From 7cafa7c598988b721015e776fab00160e7b449f7 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi The owner The name of the template from which to create the new database The encoding (cannot be changed) The locale (cannot be changed) The locale provider (cannot be changed) The LC_COLLATE (cannot be changed) The LC_CTYPE (cannot be changed) The ICU_LOCALE (cannot be changed) The ICU_RULES (cannot be changed) The configuration for the barman-cloud tool suite EndpointCA store the CA bundle of the barman endpoint.
@@ -575,7 +575,7 @@ errors with certificate issuer and barman-cloud-wal-archive. The cluster to backup The potential credentials for each cloud provider EndpointCA store the CA bundle of the barman endpoint.
@@ -912,7 +912,7 @@ by applications. Defaults to the value of the Name of the secret containing the initial credentials for the
@@ -1082,7 +1082,7 @@ by applications. Defaults to the value of the Name of the secret containing the initial credentials for the
@@ -1178,7 +1178,7 @@ by applications. Defaults to the value of the Name of the secret containing the initial credentials for the
@@ -1490,7 +1490,7 @@ Undefined or 0 disable synchronous replication. The secret containing the superuser password. If not defined a new
@@ -1517,7 +1517,7 @@ user by setting it to The list of pull secrets to be used to pull the images The configuration for the barman-cloud tool suite The list of config maps containing the custom queries The list of secrets containing the custom queries The credentials of the user that need to be used for the authentication
@@ -3707,7 +3707,7 @@ part for now. This is the cluster reference on which the Pooler will work.
@@ -4205,7 +4205,7 @@ Reference: https://www.postgresql.org/docs/current/sql-createrole.html Secret containing the password of the role (if present)
@@ -4332,14 +4332,14 @@ in their respective arrays. SecretRefs holds a list of references to Secrets ConfigMapRefs holds a list of references to ConfigMaps The cluster to backup The ICU_RULES (cannot be changed) The BUILTIN_LOCALE (cannot be changed) The COLLATION_VERSION (cannot be changed) Whether the backup was online/hot ( A map containing the plugin metadata Database is the Schema for the databases API Specification of the desired Database.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Most recently observed status of the Database. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+template
+string
+
+
+
+
diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go
index b8875e8a73..1cf32d83a0 100644
--- a/internal/management/controller/database_controller_sql.go
+++ b/internal/management/controller/database_controller_sql.go
@@ -60,6 +60,9 @@ func createDatabase(
if len(obj.Spec.Owner) > 0 {
sqlCreateDatabase += fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize())
}
+ if len(obj.Spec.Template) > 0 {
+ sqlCreateDatabase += fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize())
+ }
if len(obj.Spec.Tablespace) > 0 {
sqlCreateDatabase += fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize())
}
diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go
index 3cfdf123a7..444267b36e 100644
--- a/internal/management/controller/database_controller_sql_test.go
+++ b/internal/management/controller/database_controller_sql_test.go
@@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
+
package controller
import (
@@ -88,17 +89,18 @@ var _ = Describe("Managed Database SQL", func() {
Context("createDatabase", func() {
It("should create a new Database", func(ctx SpecContext) {
database.Spec.IsTemplate = ptr.To(true)
+ database.Spec.Template = "myTemplate"
database.Spec.Tablespace = "myTablespace"
database.Spec.AllowConnections = ptr.To(true)
database.Spec.ConnectionLimit = ptr.To(-1)
expectedValue := sqlmock.NewResult(0, 1)
expectedQuery := fmt.Sprintf(
- "CREATE DATABASE %s OWNER %s TABLESPACE %s "+
+ "CREATE DATABASE %s OWNER %s TEMPLATE %s TABLESPACE %s "+
"ALLOW_CONNECTIONS %t CONNECTION LIMIT %d IS_TEMPLATE %t",
pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(),
- pgx.Identifier{database.Spec.Tablespace}.Sanitize(), *database.Spec.AllowConnections,
- *database.Spec.ConnectionLimit, *database.Spec.IsTemplate,
+ pgx.Identifier{database.Spec.Template}.Sanitize(), pgx.Identifier{database.Spec.Tablespace}.Sanitize(),
+ *database.Spec.AllowConnections, *database.Spec.ConnectionLimit, *database.Spec.IsTemplate,
)
dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue)
From e12d51ece45ec260a73233016a763e258472bfcf Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi encoding
string
+
+locale
+string
+
+
+
+
+locale_provider
+string
+
+
+
+
+lc_collate
+string
+
+
+
+
+lc_ctype
+string
+
+
+
+
+icu_locale
+string
+
+
+
+icu_rules
+string
+
+
+
diff --git a/docs/src/samples/database-example-icu.yaml b/docs/src/samples/database-example-icu.yaml
new file mode 100644
index 0000000000..7a6bba7e4d
--- /dev/null
+++ b/docs/src/samples/database-example-icu.yaml
@@ -0,0 +1,16 @@
+# NOTE: this manifest will only work properly if the Postgres version supports
+# ICU locales and rules (version 16 and newer)
+apiVersion: postgresql.cnpg.io/v1
+kind: Database
+metadata:
+ name: db-icu
+spec:
+ name: declarative-icu
+ owner: app
+ encoding: UTF8
+ locale_provider: icu
+ icu_locale: en
+ icu_rules: fr
+ template: template0
+ cluster:
+ name: cluster-example
diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go
index 1cf32d83a0..cd01a4f926 100644
--- a/internal/management/controller/database_controller_sql.go
+++ b/internal/management/controller/database_controller_sql.go
@@ -20,7 +20,9 @@ import (
"context"
"database/sql"
"fmt"
+ "strings"
+ "github.com/cloudnative-pg/machinery/pkg/log"
"github.com/jackc/pgx/v5"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -56,27 +58,54 @@ func createDatabase(
db *sql.DB,
obj *apiv1.Database,
) error {
- sqlCreateDatabase := fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize())
+ var sqlCreateDatabase strings.Builder
+ sqlCreateDatabase.WriteString(fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize()))
if len(obj.Spec.Owner) > 0 {
- sqlCreateDatabase += fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize())
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize()))
}
if len(obj.Spec.Template) > 0 {
- sqlCreateDatabase += fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize())
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize()))
}
if len(obj.Spec.Tablespace) > 0 {
- sqlCreateDatabase += fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize())
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize()))
}
if obj.Spec.AllowConnections != nil {
- sqlCreateDatabase += fmt.Sprintf(" ALLOW_CONNECTIONS %v", *obj.Spec.AllowConnections)
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" ALLOW_CONNECTIONS %v", *obj.Spec.AllowConnections))
}
if obj.Spec.ConnectionLimit != nil {
- sqlCreateDatabase += fmt.Sprintf(" CONNECTION LIMIT %v", *obj.Spec.ConnectionLimit)
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" CONNECTION LIMIT %v", *obj.Spec.ConnectionLimit))
}
if obj.Spec.IsTemplate != nil {
- sqlCreateDatabase += fmt.Sprintf(" IS_TEMPLATE %v", *obj.Spec.IsTemplate)
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" IS_TEMPLATE %v", *obj.Spec.IsTemplate))
+ }
+ if obj.Spec.Encoding != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" ENCODING %s", pgx.Identifier{obj.Spec.Encoding}.Sanitize()))
+ }
+ if obj.Spec.Locale != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE %s", pgx.Identifier{obj.Spec.Locale}.Sanitize()))
+ }
+ if obj.Spec.LocaleProvider != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE_PROVIDER %s", pgx.Identifier{obj.Spec.LocaleProvider}.Sanitize()))
+ }
+ if obj.Spec.LcCollate != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_COLLATE %s", pgx.Identifier{obj.Spec.LcCollate}.Sanitize()))
+ }
+ if obj.Spec.LcCtype != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_CTYPE %s", pgx.Identifier{obj.Spec.LcCtype}.Sanitize()))
+ }
+ if obj.Spec.IcuLocale != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_LOCALE %s", pgx.Identifier{obj.Spec.IcuLocale}.Sanitize()))
+ }
+ if obj.Spec.IcuRules != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize()))
}
- _, err := db.ExecContext(ctx, sqlCreateDatabase)
+ contextLogger, ctx := log.SetupLogger(ctx)
+
+ _, err := db.ExecContext(ctx, sqlCreateDatabase.String())
+ if err != nil {
+ contextLogger.Error(err, "while creating database", "query", sqlCreateDatabase.String())
+ }
return err
}
@@ -86,6 +115,8 @@ func updateDatabase(
db *sql.DB,
obj *apiv1.Database,
) error {
+ contextLogger, ctx := log.SetupLogger(ctx)
+
if obj.Spec.AllowConnections != nil {
changeAllowConnectionsSQL := fmt.Sprintf(
"ALTER DATABASE %s WITH ALLOW_CONNECTIONS %v",
@@ -93,6 +124,7 @@ func updateDatabase(
*obj.Spec.AllowConnections)
if _, err := db.ExecContext(ctx, changeAllowConnectionsSQL); err != nil {
+ contextLogger.Error(err, "while altering database", "query", changeAllowConnectionsSQL)
return fmt.Errorf("while altering database %q with allow_connections %t: %w",
obj.Spec.Name, *obj.Spec.AllowConnections, err)
}
@@ -105,6 +137,7 @@ func updateDatabase(
*obj.Spec.ConnectionLimit)
if _, err := db.ExecContext(ctx, changeConnectionsLimitSQL); err != nil {
+ contextLogger.Error(err, "while altering database", "query", changeConnectionsLimitSQL)
return fmt.Errorf("while altering database %q with connection limit %d: %w",
obj.Spec.Name, *obj.Spec.ConnectionLimit, err)
}
@@ -117,6 +150,7 @@ func updateDatabase(
*obj.Spec.IsTemplate)
if _, err := db.ExecContext(ctx, changeIsTemplateSQL); err != nil {
+ contextLogger.Error(err, "while altering database", "query", changeIsTemplateSQL)
return fmt.Errorf("while altering database %q with is_template %t: %w",
obj.Spec.Name, *obj.Spec.IsTemplate, err)
}
@@ -129,6 +163,7 @@ func updateDatabase(
pgx.Identifier{obj.Spec.Owner}.Sanitize())
if _, err := db.ExecContext(ctx, changeOwnerSQL); err != nil {
+ contextLogger.Error(err, "while altering database", "query", changeOwnerSQL)
return fmt.Errorf("while altering database %q owner %s to: %w",
obj.Spec.Name, obj.Spec.Owner, err)
}
@@ -141,6 +176,7 @@ func updateDatabase(
pgx.Identifier{obj.Spec.Tablespace}.Sanitize())
if _, err := db.ExecContext(ctx, changeTablespaceSQL); err != nil {
+ contextLogger.Error(err, "while altering database", "query", changeTablespaceSQL)
return fmt.Errorf("while altering database %q tablespace %s: %w",
obj.Spec.Name, obj.Spec.Tablespace, err)
}
@@ -154,11 +190,13 @@ func dropDatabase(
db *sql.DB,
obj *apiv1.Database,
) error {
+ contextLogger, ctx := log.SetupLogger(ctx)
+ query := fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize())
_, err := db.ExecContext(
ctx,
- fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize()),
- )
+ query)
if err != nil {
+ contextLogger.Error(err, "while dropping database", "query", query)
return fmt.Errorf("while dropping database %q: %w", obj.Spec.Name, err)
}
diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go
index 444267b36e..b95a13e076 100644
--- a/internal/management/controller/database_controller_sql_test.go
+++ b/internal/management/controller/database_controller_sql_test.go
@@ -107,6 +107,32 @@ var _ = Describe("Managed Database SQL", func() {
err = createDatabase(ctx, db, database)
Expect(err).ToNot(HaveOccurred())
})
+
+ It("should create a new Database with locale and encoding kind fields", func(ctx SpecContext) {
+ database.Spec.Locale = "POSIX"
+ database.Spec.LocaleProvider = "icu"
+ database.Spec.LcCtype = "en_US.utf8"
+ database.Spec.LcCollate = "C"
+ database.Spec.Encoding = "LATIN1"
+ database.Spec.IcuLocale = "en"
+ database.Spec.IcuRules = "fr"
+
+ expectedValue := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE DATABASE %s OWNER %s "+
+ "ENCODING %s LOCALE %s LOCALE_PROVIDER %s LC_COLLATE %s LC_CTYPE %s "+
+ "ICU_LOCALE %s ICU_RULES %s",
+ pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(),
+ pgx.Identifier{database.Spec.Encoding}.Sanitize(), pgx.Identifier{database.Spec.Locale}.Sanitize(),
+ pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.LcCollate}.Sanitize(),
+ pgx.Identifier{database.Spec.LcCtype}.Sanitize(),
+ pgx.Identifier{database.Spec.IcuLocale}.Sanitize(), pgx.Identifier{database.Spec.IcuRules}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue)
+
+ err = createDatabase(ctx, db, database)
+ Expect(err).ToNot(HaveOccurred())
+ })
})
Context("updateDatabase", func() {
diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go
index ee9f59942a..6861d17a33 100644
--- a/tests/e2e/declarative_database_management_test.go
+++ b/tests/e2e/declarative_database_management_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
+ "fmt"
"time"
"k8s.io/apimachinery/pkg/types"
@@ -48,16 +49,15 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke
Context("plain vanilla cluster", Ordered, func() {
const (
namespacePrefix = "declarative-db"
- databaseCrdName = "db-declarative"
dbname = "declarative"
)
var (
- clusterName, namespace string
- database *apiv1.Database
+ clusterName, namespace, databaseObjectName string
+ database *apiv1.Database
+ err error
)
BeforeAll(func() {
- var err error
// Create a cluster in a namespace we'll delete after the test
namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
@@ -88,11 +88,28 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke
}, 300).Should(Succeed())
}
+ assertDatabaseHasExpectedFields := func(namespace, primaryPod string, db apiv1.Database) {
+ query := fmt.Sprintf("select count(*) from pg_database where datname = '%s' "+
+ "and encoding = %s and datctype = '%s' and datcollate = '%s'",
+ db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate)
+ Eventually(func(g Gomega) {
+ stdout, _, err := env.ExecQueryInInstancePod(
+ utils.PodLocator{
+ Namespace: namespace,
+ PodName: primaryPod,
+ },
+ "postgres",
+ query)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(stdout).Should(ContainSubstring("1"))
+ }, 30).Should(Succeed())
+ }
+
When("Database CRD reclaim policy is set to retain (default) inside spec", func() {
It("can add a declarative database", func() {
By("applying Database CRD manifest", func() {
CreateResourceFromFile(namespace, databaseManifest)
- _, err := env.GetResourceNameFromYAML(databaseManifest)
+ databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest)
Expect(err).NotTo(HaveOccurred())
})
By("ensuring the Database CRD succeeded reconciliation", func() {
@@ -100,7 +117,7 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke
database = &apiv1.Database{}
databaseNamespacedName := types.NamespacedName{
Namespace: namespace,
- Name: databaseCrdName,
+ Name: databaseObjectName,
}
Eventually(func(g Gomega) {
@@ -110,11 +127,25 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke
}, 300).WithPolling(10 * time.Second).Should(Succeed())
})
- By("verifying new database has been added", func() {
+ By("verifying new database has been created with the expected fields", func() {
primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
assertDatabaseExists(namespace, primaryPodInfo.Name, dbname, true)
+
+ // NOTE: the `pg_database` table in Postgres does not contain fields
+ // for the owner nor the template.
+ // Its fields are dependent on the version of Postgres, so we pick
+ // a subset that is available to check even on PG v12
+ expectedDatabaseFields := apiv1.Database{
+ Spec: apiv1.DatabaseSpec{
+ Name: "declarative",
+ LcCtype: "en_US.utf8",
+ LcCollate: "C", // this is the default value
+ Encoding: "0", // corresponds to SQL_ASCII
+ },
+ }
+ assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, expectedDatabaseFields)
})
})
diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template
index afa83d0ccd..3ded03c50a 100644
--- a/tests/e2e/fixtures/declarative_databases/database.yaml.template
+++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template
@@ -5,5 +5,8 @@ metadata:
spec:
name: declarative
owner: app
+ lc_ctype: "en_US.utf8"
+ encoding: SQL_ASCII
+ template: template0
cluster:
name: cluster-with-declarative-databases
From d0a14c66a76f35fd45eb08096e07ee2bb7aeebfc Mon Sep 17 00:00:00 2001
From: Gabriele Quaresima isTemplate
bool
barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
Field Description LocalObjectReference
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
(Members of
LocalObjectReference are embedded into this type.)
No description provided.endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
Field Description cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
Field Description BarmanCredentials
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials
(Members of
BarmanCredentials are embedded into this type.)
endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
database key.secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
database key.secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
database key.secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
superuserSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
NULL. Disabled by default.imagePullSecrets
-[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
customQueriesConfigMap
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
customQueriesSecret
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
authQuerySecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
Field Description cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
passwordSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
Field Description secretRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
configMapRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
+cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+
+builtin_locale
+string
+
+
+
+collation_version
+string
+
+
+
+
diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go
index de8eaebdf3..b55ac5a659 100644
--- a/internal/management/controller/database_controller_sql.go
+++ b/internal/management/controller/database_controller_sql.go
@@ -101,6 +101,13 @@ func createDatabase(
if obj.Spec.IcuRules != "" {
sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize()))
}
+ if obj.Spec.BuiltinLocale != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize()))
+ }
+ if obj.Spec.CollationVersion != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" COLLATION_VERSION %s",
+ pgx.Identifier{obj.Spec.CollationVersion}.Sanitize()))
+ }
_, err := db.ExecContext(ctx, sqlCreateDatabase.String())
if err != nil {
diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go
index b95a13e076..25a697db47 100644
--- a/internal/management/controller/database_controller_sql_test.go
+++ b/internal/management/controller/database_controller_sql_test.go
@@ -133,6 +133,25 @@ var _ = Describe("Managed Database SQL", func() {
err = createDatabase(ctx, db, database)
Expect(err).ToNot(HaveOccurred())
})
+
+ It("should create a new Database with builtin locale", func(ctx SpecContext) {
+ database.Spec.LocaleProvider = "builtin"
+ database.Spec.BuiltinLocale = "C"
+ database.Spec.CollationVersion = "1.2.3"
+
+ expectedValue := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE DATABASE %s OWNER %s "+
+ "LOCALE_PROVIDER %s BUILTIN_LOCALE %s COLLATION_VERSION %s",
+ pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(),
+ pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(),
+ pgx.Identifier{database.Spec.CollationVersion}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue)
+
+ err = createDatabase(ctx, db, database)
+ Expect(err).ToNot(HaveOccurred())
+ })
})
Context("updateDatabase", func() {
From 19f941b755a2390b2731c641d915ddff666bf306 Mon Sep 17 00:00:00 2001
From: Gabriele Fedi <91485518+GabriFedi97@users.noreply.github.com>
Date: Thu, 10 Oct 2024 17:07:20 +0200
Subject: [PATCH 055/836] fix(cnpg-i): ensure instance manager invokes only the
available plugins (#5651)
The instance manager should try to load only available plugins, as some
of them declared in the Cluster spec might be available only to the
operator.
closes #5648
---------
Signed-off-by: Gabriele Fedi isTemplate
bool
true) or offline/cold (false)
diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go
index f2a7a211ba..131438247a 100644
--- a/internal/cnpi/plugin/client/backup.go
+++ b/internal/cnpi/plugin/client/backup.go
@@ -86,6 +86,8 @@ type BackupResponse struct {
// This field is set to true for online/hot backups and to false otherwise.
Online bool
+
+ Metadata map[string]string
}
func (data *data) Backup(
@@ -162,5 +164,6 @@ func (data *data) Backup(
TablespaceMapFile: result.TablespaceMapFile,
InstanceID: result.InstanceId,
Online: result.Online,
+ Metadata: result.Metadata,
}, nil
}
diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go
index d0cc5043eb..5d1ad1562b 100644
--- a/pkg/management/postgres/webserver/plugin_backup.go
+++ b/pkg/management/postgres/webserver/plugin_backup.go
@@ -137,6 +137,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) {
b.Backup.Status.BackupLabelFile = response.BackupLabelFile
b.Backup.Status.TablespaceMapFile = response.TablespaceMapFile
b.Backup.Status.Online = ptr.To(response.Online)
+ b.Backup.Status.PluginMetadata = response.Metadata
if !response.StartedAt.IsZero() {
b.Backup.Status.StartedAt = ptr.To(metav1.NewTime(response.StartedAt))
From ebe20200dee0bd8ed357ab459b6e5fdc30d12855 Mon Sep 17 00:00:00 2001
From: Jonas Kalderstam
+pluginMetadata [Required]
+map[string]string
+
+
+
Date: Tue, 15 Oct 2024 10:13:24 +0200
Subject: [PATCH 067/836] fix: set `TMPDIR` and `PSQL_HISTORY` environment
variables (#5503)
Ensure the `TMPDIR` environment variable is correctly set to
`/controller/tmp` for temporary files. Additionally, define the
`PSQL_HISTORY` variable as `/controller/tmp/.psql_history` to store
PostgreSQL command history in a controlled location.
This addresses issues with file management during execution and improves the
isolation of temporary files and session history.
Closes #5420
Closes #4137
Signed-off-by: Balthazar Rouberol
Signed-off-by: Leonardo Cecchi
- Manage certificates.
- Make a rollout restart cluster to apply configuration changes.
- Make a reconciliation loop to reload and apply configuration changes.
- For more information, please see [`cnpg` plugin](kubectl-plugin.md) documentation.
-
Get PostgreSQL container image version:
```shell
From 8cc2dc9fcd692d08e534615e701ac13585d76140 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ description:
+ description: Description of this PostgreSQL cluster
+ type: string
+ enablePDB:
+ default: true
+ description: |-
+ Manage the `PodDisruptionBudget` resources within the cluster. When
+ configured as `true` (default setting), the pod disruption budgets
+ will safeguard the primary node from being terminated. Conversely,
+ setting it to `false` will result in the absence of any
+ `PodDisruptionBudget` resource, permitting the shutdown of all nodes
+ hosting the PostgreSQL cluster. This latter configuration is
+ advisable for any PostgreSQL cluster employed for
+ development/staging purposes.
+ type: boolean
+ enableSuperuserAccess:
+ default: false
+ description: |-
+ When this option is enabled, the operator will use the `SuperuserSecret`
+ to update the `postgres` user password (if the secret is
+ not present, the operator will automatically create one). When this
+ option is disabled, the operator will ignore the `SuperuserSecret` content, delete
+ it when automatically created, and then blank the password of the `postgres`
+ user by setting it to `NULL`. Disabled by default.
+ type: boolean
+ env:
+ description: |-
+ Env follows the Env format to pass environment variables
+ to the pods created in the cluster
+ items:
+ description: EnvVar represents an environment variable present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ expirations:
+ additionalProperties:
+ type: string
+ description: Expiration dates for all certificates.
+ type: object
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ cloudNativePGCommitHash:
+ description: The commit hash number of which this operator running
+ type: string
+ cloudNativePGOperatorHash:
+ description: The hash of the binary of the operator
+ type: string
+ conditions:
+ description: Conditions for cluster object
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ configMapResourceVersion:
+ description: |-
+ The list of resource versions of the configmaps,
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ configmap data
+ properties:
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the config maps used to pass metrics.
+ Map keys are the config map names, map values are the versions
+ type: object
+ type: object
+ currentPrimary:
+ description: Current primary instance
+ type: string
+ currentPrimaryFailingSinceTimestamp:
+ description: |-
+ The timestamp when the primary was detected to be unhealthy
+ This field is reported when `.spec.failoverDelay` is populated or during online upgrades
+ type: string
+ currentPrimaryTimestamp:
+ description: The timestamp when the last actual promotion to primary
+ has occurred
+ type: string
+ danglingPVC:
+ description: |-
+ List of all the PVCs created by this cluster and still available
+ which are not attached to a Pod
+ items:
+ type: string
+ type: array
+ demotionToken:
+ description: |-
+ DemotionToken is a JSON token containing the information
+ from pg_controldata such as Database system identifier, Latest checkpoint's
+ TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO
+ WAL file, and Time of latest checkpoint
+ type: string
+ firstRecoverabilityPoint:
+ description: |-
+ The first recoverability point, stored as a date in RFC3339 format.
+ This field is calculated from the content of FirstRecoverabilityPointByMethod
+ type: string
+ firstRecoverabilityPointByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: The first recoverability point, stored as a date in RFC3339
+ format, per backup method type
+ type: object
+ healthyPVC:
+ description: List of all the PVCs not dangling nor initializing
+ items:
+ type: string
+ type: array
+ image:
+ description: Image contains the image name used by the pods
+ type: string
+ initializingPVC:
+ description: List of all the PVCs that are being initialized by this
+ cluster
+ items:
+ type: string
+ type: array
+ instanceNames:
+ description: List of instance names in the cluster
+ items:
+ type: string
+ type: array
+ instances:
+ description: The total number of PVC Groups detected in the cluster.
+ It may differ from the number of existing instance pods.
+ type: integer
+ instancesReportedState:
+ additionalProperties:
+ description: InstanceReportedState describes the last reported state
+ of an instance during a reconciliation loop
+ properties:
+ isPrimary:
+ description: indicates if an instance is the primary one
+ type: boolean
+ timeLineID:
+ description: indicates on which TimelineId the instance is
+ type: integer
+ required:
+ - isPrimary
+ type: object
+ description: The reported state of the instances during the last reconciliation
+ loop
+ type: object
+ instancesStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: InstancesStatus indicates in which status the instances
+ are
+ type: object
+ jobCount:
+ description: How many Jobs have been created by this cluster
+ format: int32
+ type: integer
+ lastFailedBackup:
+ description: Stored as a date in RFC3339 format
+ type: string
+ lastPromotionToken:
+ description: |-
+ LastPromotionToken is the last verified promotion token that
+ was used to promote a replica cluster
+ type: string
+ lastSuccessfulBackup:
+ description: |-
+ Last successful backup, stored as a date in RFC3339 format
+ This field is calculated from the content of LastSuccessfulBackupByMethod
+ type: string
+ lastSuccessfulBackupByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: Last successful backup, stored as a date in RFC3339 format,
+ per backup method type
+ type: object
+ latestGeneratedNode:
+ description: ID of the latest generated node (used to avoid node name
+ clashing)
+ type: integer
+ managedRolesStatus:
+ description: ManagedRolesStatus reports the state of the managed roles
+ in the cluster
+ properties:
+ byStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: ByStatus gives the list of roles in each state
+ type: object
+ cannotReconcile:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: |-
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+ with an explanation of the cause
+ type: object
+ passwordStatus:
+ additionalProperties:
+ description: PasswordState represents the state of the password
+ of a managed RoleConfiguration
+ properties:
+ resourceVersion:
+ description: the resource version of the password secret
+ type: string
+ transactionID:
+ description: the last transaction ID to affect the role
+ definition in PostgreSQL
+ format: int64
+ type: integer
+ type: object
+ description: PasswordStatus gives the last transaction id and
+ password secret version for each managed role
+ type: object
+ type: object
+ onlineUpdateEnabled:
+ description: OnlineUpdateEnabled shows if the online upgrade is enabled
+ inside the cluster
+ type: boolean
+ phase:
+ description: Current phase of the cluster
+ type: string
+ phaseReason:
+ description: Reason for the current phase
+ type: string
+ pluginStatus:
+ description: PluginStatus is the status of the loaded plugins
+ items:
+ description: PluginStatus is the status of a loaded plugin
+ properties:
+ backupCapabilities:
+ description: |-
+ BackupCapabilities are the list of capabilities of the
+ plugin regarding the Backup management
+ items:
+ type: string
+ type: array
+ capabilities:
+ description: |-
+ Capabilities are the list of capabilities of the
+ plugin
+ items:
+ type: string
+ type: array
+ name:
+ description: Name is the name of the plugin
+ type: string
+ operatorCapabilities:
+ description: |-
+ OperatorCapabilities are the list of capabilities of the
+ plugin regarding the reconciler
+ items:
+ type: string
+ type: array
+ status:
+ description: Status contain the status reported by the plugin
+ through the SetStatusInCluster interface
+ type: string
+ version:
+ description: |-
+ Version is the version of the plugin loaded by the
+ latest reconciliation loop
+ type: string
+ walCapabilities:
+ description: |-
+ WALCapabilities are the list of capabilities of the
+ plugin regarding the WAL management
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - version
+ type: object
+ type: array
+ poolerIntegrations:
+ description: The integration needed by poolers referencing the cluster
+ properties:
+ pgBouncerIntegration:
+ description: PgBouncerIntegrationStatus encapsulates the needed
+ integration for the pgbouncer poolers referencing the cluster
+ properties:
+ secrets:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ pvcCount:
+ description: How many PVCs have been created by this cluster
+ format: int32
+ type: integer
+ readService:
+ description: Current list of read pods
+ type: string
+ readyInstances:
+ description: The total number of ready instances in the cluster. It
+ is equal to the number of ready instance pods.
+ type: integer
+ resizingPVC:
+ description: List of all the PVCs that have ResizingPVC condition.
+ items:
+ type: string
+ type: array
+ secretsResourceVersion:
+ description: |-
+ The list of resource versions of the secrets
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ secret data
+ properties:
+ applicationSecretVersion:
+ description: The resource version of the "app" user secret
+ type: string
+ barmanEndpointCA:
+ description: The resource version of the Barman Endpoint CA if
+ provided
+ type: string
+ caSecretVersion:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ clientCaSecretVersion:
+ description: The resource version of the PostgreSQL client-side
+ CA secret version
+ type: string
+ externalClusterSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the external cluster secrets
+ type: object
+ managedRoleSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the managed roles secrets
+ type: object
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the secrets used to pass metrics.
+ Map keys are the secret names, map values are the versions
+ type: object
+ replicationSecretVersion:
+ description: The resource version of the "streaming_replica" user
+ secret
+ type: string
+ serverCaSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ CA secret version
+ type: string
+ serverSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ secret version
+ type: string
+ superuserSecretVersion:
+ description: The resource version of the "postgres" user secret
+ type: string
+ type: object
+ switchReplicaClusterStatus:
+ description: SwitchReplicaClusterStatus is the status of the switch
+ to replica cluster
+ properties:
+ inProgress:
+ description: InProgress indicates if there is an ongoing procedure
+ of switching a cluster to a replica cluster.
+ type: boolean
+ type: object
+ tablespacesStatus:
+ description: TablespacesStatus reports the state of the declarative
+ tablespaces in the cluster
+ items:
+ description: TablespaceState represents the state of a tablespace
+ in a cluster
+ properties:
+ error:
+ description: Error is the reconciliation error, if any
+ type: string
+ name:
+ description: Name is the name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ type: string
+ state:
+ description: State is the latest reconciliation state
+ type: string
+ required:
+ - name
+ - state
+ type: object
+ type: array
+ targetPrimary:
+ description: |-
+ Target primary instance, this is different from the previous one
+ during a switchover or a failover
+ type: string
+ targetPrimaryTimestamp:
+ description: The timestamp when the last request for a new primary
+ has occurred
+ type: string
+ timelineID:
+ description: The timeline of the Postgres cluster
+ type: integer
+ topology:
+ description: Instances topology.
+ properties:
+ instances:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: PodTopologyLabels represent the topology of a Pod.
+ map[labelName]labelValue
+ type: object
+ description: Instances contains the pod topology of the instances
+ type: object
+ nodesUsed:
+ description: |-
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+ A value of '1' suggests that all instances are hosted on a single node,
+ implying the absence of High Availability (HA). Ideally, this value should
+ be the same as the number of instances in the Postgres HA cluster, implying
+ shared nothing architecture on the compute side.
+ format: int32
+ type: integer
+ successfullyExtracted:
+ description: |-
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+ in synchronous replica election in case of failures
+ type: boolean
+ type: object
+ unusablePVC:
+ description: List of all the PVCs that are unusable because another
+ PVC is missing
+ items:
+ type: string
+ type: array
+ writeService:
+ description: Current write pod
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.4
+ name: imagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ImageCatalog
+ listKind: ImageCatalogList
+ plural: imagecatalogs
+ singular: imagecatalog
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ImageCatalog is the Schema for the imagecatalogs API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.4
+ name: poolers.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Pooler
+ listKind: PoolerList
+ plural: poolers
+ singular: pooler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.type
+ name: Type
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Pooler is the Schema for the poolers API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the Pooler.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: |-
+ This is the cluster reference on which the Pooler will work.
+ Pooler name should never match with any cluster name within the same namespace.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ deploymentStrategy:
+ description: The deployment strategy to use for pgbouncer to replace
+ existing pods with new ones
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if DeploymentStrategyType =
+ RollingUpdate.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be scheduled above the desired number of
+ pods.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up.
+ Defaults to 25%.
+ Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ the rolling update starts, such that the total number of old and new pods do not exceed
+ 130% of desired pods. Once old pods have been killed,
+ new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ at any time during the update is at most 130% of desired pods.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ Absolute number is calculated from percentage by rounding down.
+ This can not be 0 if MaxSurge is 0.
+ Defaults to 25%.
+ Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ that the total number of pods available at all times during the update is at
+ least 70% of desired pods.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description: Type of deployment. Can be "Recreate" or "RollingUpdate".
+ Default is RollingUpdate.
+ type: string
+ type: object
+ instances:
+ default: 1
+ description: 'The number of replicas we want. Default: 1.'
+ format: int32
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this pooler.
+ properties:
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ pgbouncer:
+ description: The PgBouncer configuration
+ properties:
+ authQuery:
+ description: |-
+ The query that will be used to download the hash of the password
+ of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
+ In case it is specified, also an AuthQuerySecret has to be specified and
+ no automatic CNPG Cluster integration will be triggered.
+ type: string
+ authQuerySecret:
+ description: |-
+ The credentials of the user that need to be used for the authentication
+ query. In case it is specified, also an AuthQuery
+ (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
+ has to be specified and no automatic CNPG Cluster integration will be triggered.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Additional parameters to be passed to PgBouncer - please check
+ the CNPG documentation for a list of options you can configure
+ type: object
+ paused:
+ default: false
+ description: |-
+ When set to `true`, PgBouncer will disconnect from the PostgreSQL
+ server, first waiting for all queries to complete, and pause all new
+ client connections until this value is set to `false` (default). Internally,
+ the operator calls PgBouncer's `PAUSE` and `RESUME` commands.
+ type: boolean
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ poolMode:
+ default: session
+ description: 'The pool mode. Default: `session`.'
+ enum:
+ - session
+ - transaction
+ type: string
+ type: object
+ serviceTemplate:
+ description: Template for the Service to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the service.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information on service's
+ port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains the configurations
+ of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations of Client
+ IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic is
+ distributed to Service endpoints. Implementations can use this field as a
+ hint, but are not required to guarantee strict adherence. If the field is
+ not set, the implementation will apply its default routing strategy. If set
+ to "PreferClose", implementations should prioritize endpoints that are
+ topologically close (e.g., same zone).
+ This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ type: object
+ template:
+ description: The template of the Pod to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum:
+ - none
+ - self
+ - cluster
+ type: string
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ immediate:
+ description: If the first backup has to be immediately start after
+ creation or not
+ type: boolean
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ - plugin
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ schedule:
+ description: |-
+ The schedule does not follow the same format used in Kubernetes CronJobs
+ as it includes an additional seconds specifier,
+ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ type: string
+ suspend:
+ description: If this backup is suspended or not
+ type: boolean
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ - schedule
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the ScheduledBackup. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ lastCheckTime:
+ description: The latest time the schedule
+ format: date-time
+ type: string
+ lastScheduleTime:
+ description: Information when was the last time that backup was successfully
+ scheduled.
+ format: date-time
+ type: string
+ nextScheduleTime:
+ description: Next time we will run a backup
+ format: date-time
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cnpg-manager
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ - secrets/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ - pods
+ - pods/exec
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - update
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - podmonitors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups
+ - clusters
+ - poolers
+ - scheduledbackups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups/status
+ - scheduledbackups/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusterimagecatalogs
+ - imagecatalogs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/finalizers
+ - poolers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/status
+ - poolers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ - roles
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cnpg-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cnpg-manager
+subjects:
+- kind: ServiceAccount
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: v1
+data:
+ queries: |
+ backends:
+ query: |
+ SELECT sa.datname
+ , sa.usename
+ , sa.application_name
+ , states.state
+ , COALESCE(sa.count, 0) AS total
+ , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
+ FROM ( VALUES ('active')
+ , ('idle')
+ , ('idle in transaction')
+ , ('idle in transaction (aborted)')
+ , ('fastpath function call')
+ , ('disabled')
+ ) AS states(state)
+ LEFT JOIN (
+ SELECT datname
+ , state
+ , usename
+ , COALESCE(application_name, '') AS application_name
+ , COUNT(*)
+ , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
+ FROM pg_catalog.pg_stat_activity
+ GROUP BY datname, state, usename, application_name
+ ) sa ON states.state = sa.state
+ WHERE sa.usename IS NOT NULL
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - usename:
+ usage: "LABEL"
+ description: "Name of the user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - state:
+ usage: "LABEL"
+ description: "State of the backend"
+ - total:
+ usage: "GAUGE"
+ description: "Number of backends"
+ - max_tx_duration_seconds:
+ usage: "GAUGE"
+ description: "Maximum duration of a transaction in seconds"
+
+ backends_waiting:
+ query: |
+ SELECT count(*) AS total
+ FROM pg_catalog.pg_locks blocked_locks
+ JOIN pg_catalog.pg_locks blocking_locks
+ ON blocking_locks.locktype = blocked_locks.locktype
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
+ AND blocking_locks.pid != blocked_locks.pid
+ JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
+ WHERE NOT blocked_locks.granted
+ metrics:
+ - total:
+ usage: "GAUGE"
+ description: "Total number of backends that are currently waiting on other queries"
+
+ pg_database:
+ query: |
+ SELECT datname
+ , pg_catalog.pg_database_size(datname) AS size_bytes
+ , pg_catalog.age(datfrozenxid) AS xid_age
+ , pg_catalog.mxid_age(datminmxid) AS mxid_age
+ FROM pg_catalog.pg_database
+ WHERE datallowconn
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - size_bytes:
+ usage: "GAUGE"
+ description: "Disk space used by the database"
+ - xid_age:
+ usage: "GAUGE"
+ description: "Number of transactions from the frozen XID to the current one"
+ - mxid_age:
+ usage: "GAUGE"
+ description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
+
+ pg_postmaster:
+ query: |
+ SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
+ FROM pg_catalog.pg_postmaster_start_time()
+ metrics:
+ - start_time:
+ usage: "GAUGE"
+ description: "Time at which postgres started (based on epoch)"
+
+ pg_replication:
+ query: "SELECT CASE WHEN (
+ NOT pg_catalog.pg_is_in_recovery()
+ OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_catalog.pg_is_in_recovery() AS in_recovery,
+ EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
+ (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
+ metrics:
+ - lag:
+ usage: "GAUGE"
+ description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
+ - is_wal_receiver_up:
+ usage: "GAUGE"
+ description: "Whether the instance wal_receiver is up"
+ - streaming_replicas:
+ usage: "GAUGE"
+ description: "Number of streaming replicas connected to the instance"
+
+ pg_replication_slots:
+ query: |
+ SELECT slot_name,
+ slot_type,
+ database,
+ active,
+ (CASE pg_catalog.pg_is_in_recovery()
+ WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
+ ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ END) as pg_wal_lsn_diff
+ FROM pg_catalog.pg_replication_slots
+ WHERE NOT temporary
+ metrics:
+ - slot_name:
+ usage: "LABEL"
+ description: "Name of the replication slot"
+ - slot_type:
+ usage: "LABEL"
+ description: "Type of the replication slot"
+ - database:
+ usage: "LABEL"
+ description: "Name of the database"
+ - active:
+ usage: "GAUGE"
+ description: "Flag indicating whether the slot is active"
+ - pg_wal_lsn_diff:
+ usage: "GAUGE"
+ description: "Replication lag in bytes"
+
+ pg_stat_archiver:
+ query: |
+ SELECT archived_count
+ , failed_count
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
+ , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
+ , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_archiver
+ metrics:
+ - archived_count:
+ usage: "COUNTER"
+ description: "Number of WAL files that have been successfully archived"
+ - failed_count:
+ usage: "COUNTER"
+ description: "Number of failed attempts for archiving WAL files"
+ - seconds_since_last_archival:
+ usage: "GAUGE"
+ description: "Seconds since the last successful archival operation"
+ - seconds_since_last_failure:
+ usage: "GAUGE"
+ description: "Seconds since the last failed archival operation"
+ - last_archived_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving succeeded"
+ - last_failed_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving failed"
+ - last_archived_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Archived WAL start LSN"
+ - last_failed_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Last failed WAL LSN"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_bgwriter:
+ runonserver: "<17.0.0"
+ query: |
+ SELECT checkpoints_timed
+ , checkpoints_req
+ , checkpoint_write_time
+ , checkpoint_sync_time
+ , buffers_checkpoint
+ , buffers_clean
+ , maxwritten_clean
+ , buffers_backend
+ , buffers_backend_fsync
+ , buffers_alloc
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - checkpoint_write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
+ - checkpoint_sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
+ - buffers_checkpoint:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints"
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_backend:
+ usage: "COUNTER"
+ description: "Number of buffers written directly by a backend"
+ - buffers_backend_fsync:
+ usage: "COUNTER"
+ description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+
+ pg_stat_bgwriter_17:
+ runonserver: ">=17.0.0"
+ name: pg_stat_bgwriter
+ query: |
+ SELECT buffers_clean
+ , maxwritten_clean
+ , buffers_alloc
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_checkpointer:
+ runonserver: ">=17.0.0"
+ query: |
+ SELECT num_timed AS checkpoints_timed
+ , num_requested AS checkpoints_req
+ , restartpoints_timed
+ , restartpoints_req
+ , restartpoints_done
+ , write_time
+ , sync_time
+ , buffers_written
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_checkpointer
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - restartpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it"
+ - restartpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested restartpoints that have been performed"
+ - restartpoints_done:
+ usage: "COUNTER"
+ description: "Number of restartpoints that have been performed"
+ - write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds"
+ - sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds"
+ - buffers_written:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints and restartpoints"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_database:
+ query: |
+ SELECT datname
+ , xact_commit
+ , xact_rollback
+ , blks_read
+ , blks_hit
+ , tup_returned
+ , tup_fetched
+ , tup_inserted
+ , tup_updated
+ , tup_deleted
+ , conflicts
+ , temp_files
+ , temp_bytes
+ , deadlocks
+ , blk_read_time
+ , blk_write_time
+ FROM pg_catalog.pg_stat_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of this database"
+ - xact_commit:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been committed"
+ - xact_rollback:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been rolled back"
+ - blks_read:
+ usage: "COUNTER"
+ description: "Number of disk blocks read in this database"
+ - blks_hit:
+ usage: "COUNTER"
+ description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
+ - tup_returned:
+ usage: "COUNTER"
+ description: "Number of rows returned by queries in this database"
+ - tup_fetched:
+ usage: "COUNTER"
+ description: "Number of rows fetched by queries in this database"
+ - tup_inserted:
+ usage: "COUNTER"
+ description: "Number of rows inserted by queries in this database"
+ - tup_updated:
+ usage: "COUNTER"
+ description: "Number of rows updated by queries in this database"
+ - tup_deleted:
+ usage: "COUNTER"
+ description: "Number of rows deleted by queries in this database"
+ - conflicts:
+ usage: "COUNTER"
+ description: "Number of queries canceled due to conflicts with recovery in this database"
+ - temp_files:
+ usage: "COUNTER"
+ description: "Number of temporary files created by queries in this database"
+ - temp_bytes:
+ usage: "COUNTER"
+ description: "Total amount of data written to temporary files by queries in this database"
+ - deadlocks:
+ usage: "COUNTER"
+ description: "Number of deadlocks detected in this database"
+ - blk_read_time:
+ usage: "COUNTER"
+ description: "Time spent reading data file blocks by backends in this database, in milliseconds"
+ - blk_write_time:
+ usage: "COUNTER"
+ description: "Time spent writing data file blocks by backends in this database, in milliseconds"
+
+ pg_stat_replication:
+ primary: true
+ query: |
+ SELECT usename
+ , COALESCE(application_name, '') AS application_name
+ , COALESCE(client_addr::text, '') AS client_addr
+ , COALESCE(client_port::text, '') AS client_port
+ , EXTRACT(EPOCH FROM backend_start) AS backend_start
+ , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes
+ , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes
+ , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds
+ FROM pg_catalog.pg_stat_replication
+ metrics:
+ - usename:
+ usage: "LABEL"
+ description: "Name of the replication user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - client_addr:
+ usage: "LABEL"
+ description: "Client IP address"
+ - client_port:
+ usage: "LABEL"
+ description: "Client TCP port"
+ - backend_start:
+ usage: "COUNTER"
+ description: "Time when this process was started"
+ - backend_xmin_age:
+ usage: "COUNTER"
+ description: "The age of this standby's xmin horizon"
+ - sent_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location sent on this connection"
+ - write_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location written to disk by this standby server"
+ - flush_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server"
+ - replay_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server"
+ - write_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it"
+ - flush_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it"
+ - replay_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it"
+
+ pg_settings:
+ query: |
+ SELECT name,
+ CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
+ FROM pg_catalog.pg_settings
+ WHERE vartype IN ('integer', 'real', 'bool')
+ ORDER BY 1
+ metrics:
+ - name:
+ usage: "LABEL"
+ description: "Name of the setting"
+ - setting:
+ usage: "GAUGE"
+ description: "Setting value"
+kind: ConfigMap
+metadata:
+ labels:
+ cnpg.io/reload: ""
+ name: cnpg-default-monitoring
+ namespace: cnpg-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ app.kubernetes.io/name: cloudnative-pg
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-controller-manager
+ namespace: cnpg-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: cloudnative-pg
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ spec:
+ containers:
+ - args:
+ - controller
+ - --leader-elect
+ - --config-map-name=cnpg-controller-manager-config
+ - --secret-name=cnpg-controller-manager-config
+ - --webhook-port=9443
+ command:
+ - /manager
+ env:
+ - name: OPERATOR_IMAGE_NAME
+ value: ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MONITORING_QUERIES_CONFIGMAP
+ value: cnpg-default-monitoring
+ image: ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1
+ livenessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsGroup: 10001
+ runAsUser: 10001
+ seccompProfile:
+ type: RuntimeDefault
+ volumeMounts:
+ - mountPath: /controller
+ name: scratch-data
+ - mountPath: /run/secrets/cnpg.io/webhook
+ name: webhook-certificates
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ serviceAccountName: cnpg-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - emptyDir: {}
+ name: scratch-data
+ - name: webhook-certificates
+ secret:
+ defaultMode: 420
+ optional: true
+ secretName: cnpg-webhook-cert
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: cnpg-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: mbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: mcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: mscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: cnpg-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: vbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: vcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-pooler
+ failurePolicy: Fail
+ name: vpooler.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - poolers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: vscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
From 976b5efe8fd1f01d074abe82d2ed7e167de856a8 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
+
+
## ImageCatalog {#postgresql-cnpg-io-v1-ImageCatalog}
@@ -492,7 +531,7 @@ plugin for this backup
+
+Field Description
+apiVersion [Required]
stringpostgresql.cnpg.io/v1
+kind [Required]
stringDatabase
+
+metadata [Required]
+meta/v1.ObjectMeta
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+
+spec [Required]
+DatabaseSpec
+
+
+
+
+
+status
+DatabaseStatus
+
+
+
Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE
-tablespaceName [Required]tablespaceNameThe backup method being used
online [Required]onlineWhether the backup was online/hot (true) or offline/cold (false)
pluginMetadata [Required]pluginMetadataephemeralVolumesSizeLimit [Required]ephemeralVolumesSizeLimitplugins [Required]pluginslastPromotionToken [Required]lastPromotionToken.spec.failoverDelay is populated or dur
Image contains the image name used by the pods
pluginStatus [Required]pluginStatusDatabase is the Schema for the databases API
- - -| Field | Description |
|---|---|
metadata [Required]-meta/v1.ObjectMeta - |
-
- No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
-
spec [Required]-DatabaseSpec - |
-
- Specification of the desired Database. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - |
-
status-DatabaseStatus - |
-
- Most recently observed status of the Database. This data may not be up to -date. Populated by the system. Read-only. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - |
-
ready [Required]readyReady is true if the database was reconciled correctly
error [Required]error| Field | Description | |||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
shm [Required]+ | ||||||||||||||||||||
shmk8s.io/apimachinery/pkg/api/resource.Quantity |
Shm is the size limit of the shared memory volume |
|||||||||||||||||||
temporaryData [Required]+ | ||||||||||||||||||||
temporaryDatak8s.io/apimachinery/pkg/api/resource.Quantity |
@@ -3063,7 +3066,7 @@ It includes the type of service and its associated template specification. Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. | |||||||||||||||||||
updateStrategy [Required]+ | ||||||||||||||||||||
updateStrategyServiceUpdateStrategy |
@@ -3102,7 +3105,7 @@ Valid values are "rw", "r", and "ro", representing Valid values are "r", and "ro", representing read, and read-only services. | |||||||||||||||||||
additional [Required]+ | ||||||||||||||||||||
additional[]ManagedService |
@@ -3133,7 +3136,7 @@ not using the core data types.
PluginConfigurationList represent a set of plugin with their +configuration parameters + + + + ## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus} @@ -3507,7 +3525,7 @@ the operator calls PgBouncer'sPAUSE and RESUME comman
latest reconciliation loop
|
|||||||||||||||||||
capabilities [Required]+ | ||||||||||||||||||||
capabilities[]string |
@@ -3515,7 +3533,7 @@ latest reconciliation loop plugin | |||||||||||||||||||
operatorCapabilities [Required]+ | ||||||||||||||||||||
operatorCapabilities[]string |
@@ -3523,7 +3541,7 @@ plugin plugin regarding the reconciler | |||||||||||||||||||
walCapabilities [Required]+ | ||||||||||||||||||||
walCapabilities[]string |
@@ -3531,7 +3549,7 @@ plugin regarding the reconciler plugin regarding the WAL management | |||||||||||||||||||
backupCapabilities [Required]+ | ||||||||||||||||||||
backupCapabilities[]string |
@@ -3539,7 +3557,7 @@ plugin regarding the WAL management plugin regarding the Backup management | |||||||||||||||||||
status [Required]+ | ||||||||||||||||||||
statusstring |
@@ -4045,7 +4063,7 @@ cluster
The corresponding cluster |
|||||||||||||||||||
ensure+EnsureOption + |
+
+ Ensure the PostgreSQL database is |
+|||||||||||||||||||
name [Required]string |
@@ -2441,18 +2448,18 @@ database is not valid
desired state that was synchronized
||||||||||||||||||||
ready+ | ||||||||||||||||||||
appliedbool |
- Ready is true if the database was reconciled correctly +Applied is true if the database was reconciled correctly |
|||||||||||||||||||
error+ | ||||||||||||||||||||
messagestring |
- Error is the reconciliation error message +Message is the reconciliation output message |
DataDurabilityLevel specifies how strictly to enforce synchronous replication
+when cluster instances are unavailable. Options are required or preferred.
dataDurabilityIf set to "required", data durability is strictly enforced. Write operations
+with synchronous commit settings (on, remote_write, or remote_apply) will
+block if there are insufficient healthy replicas, ensuring data persistence.
+If set to "preferred", data durability is maintained when healthy replicas
+are available, but the required number of instances will adjust dynamically
+if replicas become unavailable. This setting relaxes strict durability enforcement
+to allow for operational continuity. This setting is only applicable if both
+standbyNamesPre and standbyNamesPost are unset (empty).
switchover) or in-place (restart
externalClusters
-[]ExternalCluster
+ExternalClusterList
The list of external clusters which are used in the configuration
@@ -2558,82 +2558,6 @@ storage
-## ExternalCluster {#postgresql-cnpg-io-v1-ExternalCluster}
-
-
-**Appears in:**
-
-- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec)
-
-
-ExternalCluster represents the connection parameters to an
-external cluster which is used in the other sections of the configuration
-
-
-
-Field Description
-
-name [Required]
-string
-
-
- The server name, required
-
-
-connectionParameters
-map[string]string
-
-
- The list of connection parameters, such as dbname, host, username, etc
-
-
-sslCert
-core/v1.SecretKeySelector
-
-
- The reference to an SSL certificate to be used to connect to this
-instance
-
-
-sslKey
-core/v1.SecretKeySelector
-
-
- The reference to an SSL private key to be used to connect to this
-instance
-
-
-sslRootCert
-core/v1.SecretKeySelector
-
-
- The reference to an SSL CA public key to be used to connect to this
-instance
-
-
-password
-core/v1.SecretKeySelector
-
-
- The reference to the password to be used to connect to the server.
-If a password is provided, CloudNativePG creates a PostgreSQL
-passfile at /controller/external/NAME/pass (where "NAME" is the
-cluster's name). This passfile is automatically referenced in the
-connection string when establishing a connection to the remote
-PostgreSQL server from the current PostgreSQL Cluster. This ensures
-secure and efficient password management for external clusters.
-
-
-barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
-
-
- The configuration for the barman-cloud tool suite
-
-
-
-
-
## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef}
@@ -3505,6 +3429,44 @@ the operator calls PgBouncer's PAUSE and RESUME comman
+## PluginConfiguration {#postgresql-cnpg-io-v1-PluginConfiguration}
+
+
+**Appears in:**
+
+
+
+PluginConfiguration specifies a plugin that need to be loaded for this
+cluster to be reconciled
+
+
+
+Field Description
+
+name [Required]
+string
+
+
+ Name is the plugin name
+
+
+enabled
+bool
+
+
+ Enabled is true if this plugin will be used
+
+
+parameters
+map[string]string
+
+
+ Parameters is the configuration of the plugin
+
+
+
+
+
## PluginConfigurationList {#postgresql-cnpg-io-v1-PluginConfigurationList}
(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.PluginConfiguration`)
diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go
index f2d4d63052..26dd30a4d1 100644
--- a/internal/cmd/manager/instance/restore/cmd.go
+++ b/internal/cmd/manager/instance/restore/cmd.go
@@ -95,6 +95,8 @@ func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error {
return err
}
+ contextLogger.Info("restore command execution completed without errors")
+
return nil
}
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index 9ca043ee0e..56bcdf447d 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -255,7 +255,10 @@ func restoreWALViaPlugins(
defer plugins.Close()
availablePluginNamesSet := stringset.From(availablePluginNames)
- enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames())
+
+ enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
+ enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...)
+ enabledPluginNamesSet := stringset.From(enabledPluginNames)
client, err := pluginClient.WithPlugins(
ctx,
diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go
index 131438247a..54aab9cc4f 100644
--- a/internal/cnpi/plugin/client/backup.go
+++ b/internal/cnpi/plugin/client/backup.go
@@ -87,6 +87,7 @@ type BackupResponse struct {
// This field is set to true for online/hot backups and to false otherwise.
Online bool
+ // This field contains the metadata to be associated with this backup
Metadata map[string]string
}
diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go
index c1b141f0e9..7ecf00960e 100644
--- a/internal/cnpi/plugin/client/contracts.go
+++ b/internal/cnpi/plugin/client/contracts.go
@@ -19,10 +19,12 @@ package client
import (
"context"
+ restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection"
)
@@ -35,6 +37,7 @@ type Client interface {
LifecycleCapabilities
WalCapabilities
BackupCapabilities
+ RestoreJobHooksCapabilities
}
// Connection describes a set of behaviour needed to properly handle the plugin connections
@@ -144,3 +147,8 @@ type BackupCapabilities interface {
parameters map[string]string,
) (*BackupResponse, error)
}
+
+// RestoreJobHooksCapabilities describes a set of behaviour needed to run the Restore
+type RestoreJobHooksCapabilities interface {
+ Restore(ctx context.Context, cluster *apiv1.Cluster) (*restore.RestoreResponse, error)
+}
diff --git a/internal/cnpi/plugin/client/restore_job.go b/internal/cnpi/plugin/client/restore_job.go
new file mode 100644
index 0000000000..028c0d3e95
--- /dev/null
+++ b/internal/cnpi/plugin/client/restore_job.go
@@ -0,0 +1,61 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "slices"
+
+ restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+// ErrNoPluginSupportsRestoreJobHooksCapability is raised when no plugin supports the restore job hooks capability
+var ErrNoPluginSupportsRestoreJobHooksCapability = errors.New("no plugin supports the restore job hooks capability")
+
+func (data *data) Restore(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+) (*restore.RestoreResponse, error) {
+ cluster.EnsureGVKIsPresent()
+
+ for idx := range data.plugins {
+ plugin := data.plugins[idx]
+
+ if !slices.Contains(plugin.RestoreJobHooksCapabilities(), restore.RestoreJobHooksCapability_KIND_RESTORE) {
+ continue
+ }
+
+ clusterDefinition, err := json.Marshal(cluster)
+ if err != nil {
+ return nil, err
+ }
+ request := restore.RestoreRequest{
+ ClusterDefinition: clusterDefinition,
+ }
+ res, err := plugin.RestoreJobHooksClient().Restore(ctx, &request)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+ }
+
+ return nil, ErrNoPluginSupportsRestoreJobHooksCapability
+}
diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go
index 490518410f..ba9e9a64db 100644
--- a/internal/cnpi/plugin/client/suite_test.go
+++ b/internal/cnpi/plugin/client/suite_test.go
@@ -25,6 +25,7 @@ import (
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
"github.com/cloudnative-pg/cnpg-i/pkg/operator"
"github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
+ restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
"github.com/cloudnative-pg/cnpg-i/pkg/wal"
"google.golang.org/grpc"
@@ -103,6 +104,14 @@ type fakeConnection struct {
operatorClient *fakeOperatorClient
}
+func (f *fakeConnection) RestoreJobHooksClient() restore.RestoreJobHooksClient {
+ panic("implement me")
+}
+
+func (f *fakeConnection) RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind {
+ panic("implement me")
+}
+
func (f *fakeConnection) setStatusResponse(status []byte) {
f.operatorClient.status = &operator.SetStatusInClusterResponse{
JsonStatus: status,
diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go
index 1a8d46e7a9..0e9826d530 100644
--- a/internal/cnpi/plugin/connection/connection.go
+++ b/internal/cnpi/plugin/connection/connection.go
@@ -28,6 +28,7 @@ import (
"github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
"github.com/cloudnative-pg/cnpg-i/pkg/operator"
"github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
+ restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
"github.com/cloudnative-pg/cnpg-i/pkg/wal"
"google.golang.org/grpc"
)
@@ -57,6 +58,7 @@ type Interface interface {
WALClient() wal.WALClient
BackupClient() backup.BackupClient
ReconcilerHooksClient() reconciler.ReconcilerHooksClient
+ RestoreJobHooksClient() restore.RestoreJobHooksClient
PluginCapabilities() []identity.PluginCapability_Service_Type
OperatorCapabilities() []operator.OperatorCapability_RPC_Type
@@ -64,6 +66,7 @@ type Interface interface {
LifecycleCapabilities() []*lifecycle.OperatorLifecycleCapabilities
BackupCapabilities() []backup.BackupCapability_RPC_Type
ReconcilerCapabilities() []reconciler.ReconcilerHooksCapability_Kind
+ RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind
Ping(ctx context.Context) error
Close() error
@@ -77,15 +80,17 @@ type data struct {
walClient wal.WALClient
backupClient backup.BackupClient
reconcilerHooksClient reconciler.ReconcilerHooksClient
-
- name string
- version string
- capabilities []identity.PluginCapability_Service_Type
- operatorCapabilities []operator.OperatorCapability_RPC_Type
- walCapabilities []wal.WALCapability_RPC_Type
- lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities
- backupCapabilities []backup.BackupCapability_RPC_Type
- reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind
+ restoreJobHooksClient restore.RestoreJobHooksClient
+
+ name string
+ version string
+ capabilities []identity.PluginCapability_Service_Type
+ operatorCapabilities []operator.OperatorCapability_RPC_Type
+ walCapabilities []wal.WALCapability_RPC_Type
+ lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities
+ backupCapabilities []backup.BackupCapability_RPC_Type
+ reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind
+ restoreJobHooksCapabilities []restore.RestoreJobHooksCapability_Kind
}
func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, error) {
@@ -102,16 +107,18 @@ func newPluginDataFromConnection(ctx context.Context, connection Handler) (data,
return data{}, fmt.Errorf("while querying plugin identity: %w", err)
}
- result := data{}
- result.connection = connection
- result.name = pluginInfoResponse.Name
- result.version = pluginInfoResponse.Version
- result.identityClient = identity.NewIdentityClient(connection)
- result.operatorClient = operator.NewOperatorClient(connection)
- result.lifecycleClient = lifecycle.NewOperatorLifecycleClient(connection)
- result.walClient = wal.NewWALClient(connection)
- result.backupClient = backup.NewBackupClient(connection)
- result.reconcilerHooksClient = reconciler.NewReconcilerHooksClient(connection)
+ result := data{
+ connection: connection,
+ name: pluginInfoResponse.Name,
+ version: pluginInfoResponse.Version,
+ identityClient: identity.NewIdentityClient(connection),
+ operatorClient: operator.NewOperatorClient(connection),
+ lifecycleClient: lifecycle.NewOperatorLifecycleClient(connection),
+ walClient: wal.NewWALClient(connection),
+ backupClient: backup.NewBackupClient(connection),
+ reconcilerHooksClient: reconciler.NewReconcilerHooksClient(connection),
+ restoreJobHooksClient: restore.NewRestoreJobHooksClient(connection),
+ }
return result, err
}
@@ -232,6 +239,27 @@ func (pluginData *data) loadBackupCapabilities(ctx context.Context) error {
return nil
}
+func (pluginData *data) loadRestoreJobHooksCapabilities(ctx context.Context) error {
+ var restoreJobHooksCapabilitiesResponse *restore.RestoreJobHooksCapabilitiesResult
+ var err error
+
+ if restoreJobHooksCapabilitiesResponse, err = pluginData.restoreJobHooksClient.GetCapabilities(
+ ctx,
+ &restore.RestoreJobHooksCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin operator capabilities: %w", err)
+ }
+
+ pluginData.restoreJobHooksCapabilities = make(
+ []restore.RestoreJobHooksCapability_Kind,
+ len(restoreJobHooksCapabilitiesResponse.Capabilities))
+ for i := range pluginData.restoreJobHooksCapabilities {
+ pluginData.restoreJobHooksCapabilities[i] = restoreJobHooksCapabilitiesResponse.Capabilities[i].Kind
+ }
+
+ return nil
+}
+
// Metadata extracts the plugin metadata reading from
// the internal metadata
func (pluginData *data) Metadata() Metadata {
@@ -288,6 +316,10 @@ func (pluginData *data) BackupClient() backup.BackupClient {
return pluginData.backupClient
}
+func (pluginData *data) RestoreJobHooksClient() restore.RestoreJobHooksClient {
+ return pluginData.restoreJobHooksClient
+}
+
func (pluginData *data) ReconcilerHooksClient() reconciler.ReconcilerHooksClient {
return pluginData.reconcilerHooksClient
}
@@ -316,6 +348,10 @@ func (pluginData *data) ReconcilerCapabilities() []reconciler.ReconcilerHooksCap
return pluginData.reconcilerCapabilities
}
+func (pluginData *data) RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind {
+ return pluginData.restoreJobHooksCapabilities
+}
+
func (pluginData *data) Ping(ctx context.Context) error {
_, err := pluginData.identityClient.Probe(ctx, &identity.ProbeRequest{})
return err
@@ -374,5 +410,13 @@ func LoadPlugin(ctx context.Context, handler Handler) (Interface, error) {
}
}
+ // If the plugin implements the restore job hooks, load its
+ // capabilities
+ if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_RESTORE_JOB) {
+ if err = result.loadRestoreJobHooksCapabilities(ctx); err != nil {
+ return nil, err
+ }
+ }
+
return &result, nil
}
diff --git a/internal/cnpi/plugin/connection/unix.go b/internal/cnpi/plugin/connection/unix.go
index 93495ba4eb..1a485c179e 100644
--- a/internal/cnpi/plugin/connection/unix.go
+++ b/internal/cnpi/plugin/connection/unix.go
@@ -20,11 +20,14 @@ package connection
import (
"context"
"fmt"
+ "time"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/timeout"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
// ProtocolUnix is for plugins that are reachable over a
@@ -38,11 +41,15 @@ func (p ProtocolUnix) Dial(ctx context.Context) (Handler, error) {
contextLogger.Debug("Connecting to plugin via local socket", "path", dialPath)
+ timeoutValue := defaultTimeout
+ value, ok := ctx.Value(utils.GRPCTimeoutKey).(time.Duration)
+ if ok {
+ contextLogger.Debug("Using custom timeout value", "timeout", value)
+ timeoutValue = value
+ }
+
return grpc.NewClient(
dialPath,
grpc.WithTransportCredentials(insecure.NewCredentials()),
- grpc.WithUnaryInterceptor(
- timeout.UnaryClientInterceptor(defaultTimeout),
- ),
- )
+ grpc.WithUnaryInterceptor(timeout.UnaryClientInterceptor(timeoutValue)))
}
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 1c5974cb7d..f433ab5d69 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -173,8 +173,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
ctx = cluster.SetInContext(ctx)
- // Load the required plugins
- pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...)
+ // Load the plugins required to bootstrap and reconcile this cluster
+ enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
+ enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...)
+ pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, enabledPluginNames...)
if err != nil {
var errUnknownPlugin *repository.ErrUnknownPlugin
if errors.As(err, &errUnknownPlugin) {
diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go
index 347c997786..1e43cb6996 100644
--- a/pkg/management/postgres/restore.go
+++ b/pkg/management/postgres/restore.go
@@ -37,15 +37,20 @@ import (
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer"
+ restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
"github.com/cloudnative-pg/machinery/pkg/execlog"
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/configfile"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/external"
@@ -228,6 +233,7 @@ func (info InitInfo) createBackupObjectForSnapshotRestore(
// Restore restores a PostgreSQL cluster from a backup into the object storage
func (info InitInfo) Restore(ctx context.Context) error {
+ contextLogger := log.FromContext(ctx)
typedClient, err := management.NewControllerRuntimeClient()
if err != nil {
return err
@@ -248,29 +254,53 @@ func (info InitInfo) Restore(ctx context.Context) error {
info.ApplicationDatabase = cluster.GetApplicationDatabaseName()
}
- // Before starting the restore we check if the archive destination is safe to use
- // otherwise, we stop creating the cluster
- err = info.checkBackupDestination(ctx, typedClient, cluster)
- if err != nil {
- return err
- }
+ var envs []string
+ var config string
- // If we need to download data from a backup, we do it
- backup, env, err := info.loadBackup(ctx, typedClient, cluster)
- if err != nil {
- return err
- }
+ // nolint:nestif
+ if pluginConfiguration := cluster.GetRecoverySourcePlugin(); pluginConfiguration != nil {
+ contextLogger.Info("Restore through plugin detected, proceeding...")
+ res, err := restoreViaPlugin(ctx, cluster, pluginConfiguration)
+ if err != nil {
+ return err
+ }
+ if res == nil {
+ return errors.New("empty response from restoreViaPlugin, programmatic error")
+ }
+ envs = res.Envs
+ config = res.RestoreConfig
+ } else {
+ // Before starting the restore we check if the archive destination is safe to use
+ // otherwise, we stop creating the cluster
+ err = info.checkBackupDestination(ctx, typedClient, cluster)
+ if err != nil {
+ return err
+ }
- if err := info.ensureArchiveContainsLastCheckpointRedoWAL(ctx, cluster, env, backup); err != nil {
- return err
- }
+ // If we need to download data from a backup, we do it
+ backup, env, err := info.loadBackup(ctx, typedClient, cluster)
+ if err != nil {
+ return err
+ }
- if err := info.restoreDataDir(ctx, backup, env); err != nil {
- return err
- }
+ if err := info.ensureArchiveContainsLastCheckpointRedoWAL(ctx, cluster, env, backup); err != nil {
+ return err
+ }
- if _, err := info.restoreCustomWalDir(ctx); err != nil {
- return err
+ if err := info.restoreDataDir(ctx, backup, env); err != nil {
+ return err
+ }
+
+ if _, err := info.restoreCustomWalDir(ctx); err != nil {
+ return err
+ }
+
+ conf, err := getRestoreWalConfig(ctx, backup)
+ if err != nil {
+ return err
+ }
+ config = conf
+ envs = env
}
if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil {
@@ -304,11 +334,11 @@ func (info InitInfo) Restore(ctx context.Context) error {
return err
}
- if err := info.writeRestoreWalConfig(ctx, backup, cluster); err != nil {
+ if err := info.writeCustomRestoreWalConfig(cluster, config); err != nil {
return err
}
- return info.ConfigureInstanceAfterRestore(ctx, cluster, env)
+ return info.ConfigureInstanceAfterRestore(ctx, cluster, envs)
}
func (info InitInfo) ensureArchiveContainsLastCheckpointRedoWAL(
@@ -581,6 +611,33 @@ func (info InitInfo) writeRestoreWalConfig(
backup *apiv1.Backup,
cluster *apiv1.Cluster,
) error {
+ conf, err := getRestoreWalConfig(ctx, backup)
+ if err != nil {
+ return err
+ }
+ recoveryFileContents := fmt.Sprintf(
+ "%s\n"+
+ "%s",
+ conf,
+ cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions())
+
+ return info.writeRecoveryConfiguration(cluster, recoveryFileContents)
+}
+
+func (info InitInfo) writeCustomRestoreWalConfig(cluster *apiv1.Cluster, conf string) error {
+ recoveryFileContents := fmt.Sprintf(
+ "%s\n"+
+ "%s",
+ conf,
+ cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions())
+
+ return info.writeRecoveryConfiguration(cluster, recoveryFileContents)
+}
+
+// getRestoreWalConfig obtains the content to append to `custom.conf` allowing PostgreSQL
+// to complete the WAL recovery from the object storage and then start
+// as a new primary
+func getRestoreWalConfig(ctx context.Context, backup *apiv1.Backup) (string, error) {
var err error
cmd := []string{barmanCapabilities.BarmanCloudWalRestore}
@@ -593,19 +650,17 @@ func (info InitInfo) writeRestoreWalConfig(
cmd, err = barmanCommand.AppendCloudProviderOptionsFromBackup(
ctx, cmd, backup.Status.BarmanCredentials)
if err != nil {
- return err
+ return "", err
}
cmd = append(cmd, "%f", "%p")
recoveryFileContents := fmt.Sprintf(
"recovery_target_action = promote\n"+
- "restore_command = '%s'\n"+
- "%s",
- strings.Join(cmd, " "),
- cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions())
+ "restore_command = '%s'\n",
+ strings.Join(cmd, " "))
- return info.writeRecoveryConfiguration(cluster, recoveryFileContents)
+ return recoveryFileContents, nil
}
func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recoveryFileContents string) error {
@@ -985,3 +1040,39 @@ func waitUntilRecoveryFinishes(db *sql.DB) error {
return nil
})
}
+
+// restoreViaPlugin tries to restore the cluster using a plugin if available and enabled.
+// Returns true if a restore plugin was found and any error encountered.
+func restoreViaPlugin(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ plugin *apiv1.PluginConfiguration,
+) (*restore.RestoreResponse, error) {
+ contextLogger := log.FromContext(ctx)
+
+ // TODO: timeout should be configurable by the user
+ ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute)
+
+ plugins := repository.New()
+ availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir)
+ if err != nil {
+ contextLogger.Error(err, "Error while loading local plugins")
+ }
+ defer plugins.Close()
+
+ availablePluginNamesSet := stringset.From(availablePluginNames)
+ contextLogger.Info("available plugins", "plugins", availablePluginNamesSet)
+
+ pClient, err := pluginClient.WithPlugins(
+ ctx,
+ plugins,
+ plugin.Name,
+ )
+ if err != nil {
+ contextLogger.Error(err, "Error while loading required plugins")
+ return nil, err
+ }
+ defer pClient.Close(ctx)
+
+ return pClient.Restore(ctx, cluster)
+}
diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go
index f818623c44..7981576c9f 100644
--- a/pkg/management/postgres/webserver/local.go
+++ b/pkg/management/postgres/webserver/local.go
@@ -23,6 +23,7 @@ import (
"fmt"
"net/http"
"strings"
+ "time"
"github.com/cloudnative-pg/machinery/pkg/log"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@@ -33,6 +34,7 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
type localWebserverEndpoints struct {
@@ -230,5 +232,7 @@ func (ws *localWebserverEndpoints) startPluginBackup(
cluster *apiv1.Cluster,
backup *apiv1.Backup,
) {
+ // TODO: timeout should be configurable by the user
+ ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute)
NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx)
}
diff --git a/pkg/utils/context.go b/pkg/utils/context.go
index e91aebab61..1f5b25a06e 100644
--- a/pkg/utils/context.go
+++ b/pkg/utils/context.go
@@ -24,3 +24,6 @@ const ContextKeyCluster contextKey = "cluster"
// PluginClientKey is the context key holding cluster data
const PluginClientKey contextKey = "pluginClient"
+
+// GRPCTimeoutKey is the context key holding the gRPC timeout
+const GRPCTimeoutKey contextKey = "grpcTimeout"
From 7331a41a99f87d5446d22d3deb7230b7b3e98c9e Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 6 Nov 2024 17:42:01 +0400
Subject: [PATCH 134/836] fix(deps): update kubernetes patches (main) (#6027)
https://github.com/prometheus-operator/prometheus-operator `v0.78.0` -> `v0.78.1`
https://github.com/kubernetes/utils `49e7df5` -> `6fe5fd8`
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index 9de3e882b0..cdb1248b22 100644
--- a/go.mod
+++ b/go.mod
@@ -27,7 +27,7 @@ require (
github.com/mitchellh/go-ps v1.0.0
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.35.1
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
github.com/sethvargo/go-password v0.3.1
@@ -45,7 +45,7 @@ require (
k8s.io/apimachinery v0.31.2
k8s.io/cli-runtime v0.31.2
k8s.io/client-go v0.31.2
- k8s.io/utils v0.0.0-20240921022957-49e7df575cb6
+ k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078
sigs.k8s.io/controller-runtime v0.19.1
sigs.k8s.io/yaml v1.4.0
)
diff --git a/go.sum b/go.sum
index cc512e6223..5fc91d02a5 100644
--- a/go.sum
+++ b/go.sum
@@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0 h1:b2L36QF60oB8Ty97UOCOnN2VnRbT6eaxzYda9kmk9zE=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 h1:Fm9Z+FabnB+6EoGq15j+pyLmaK6hYrYOpBlTzOLTQ+E=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
@@ -292,8 +292,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo=
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA=
-k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI=
-k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno=
+k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk=
sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
From 6b5da958154db3650240e83e3d182120c1346bee Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Thu, 7 Nov 2024 11:08:50 +0100
Subject: [PATCH 135/836] refactor: bring role reconciler Postgres functions in
line with other reconcilers (#5958)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Use an instance interface rather than a `postgres.Instance` in the
Reconcile, to make it testable
- Rewrite the Postgres-facing functions to take a `sql.DB` parameter
- Discard the custom mocks and use sqlmock for unit tests
- Do proper error propagation on unexpected errors when
reconciling with the DB
Closes #5957
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Signed-off-by: Niccolò Fei
Co-authored-by: Armando Ruocco
Co-authored-by: Niccolò Fei
---
.../management/controller/roles/contract.go | 23 -
.../management/controller/roles/postgres.go | 83 +-
.../controller/roles/postgres_test.go | 83 +-
.../management/controller/roles/reconciler.go | 3 +-
.../controller/roles/reconciler_test.go | 2 +-
.../management/controller/roles/runnable.go | 101 ++-
.../controller/roles/runnable_test.go | 777 +++++++-----------
.../management/controller/roles/suite_test.go | 23 +
8 files changed, 414 insertions(+), 681 deletions(-)
diff --git a/internal/management/controller/roles/contract.go b/internal/management/controller/roles/contract.go
index 294023c4e5..d72cf3e675 100644
--- a/internal/management/controller/roles/contract.go
+++ b/internal/management/controller/roles/contract.go
@@ -17,7 +17,6 @@ limitations under the License.
package roles
import (
- "context"
"database/sql"
"reflect"
"sort"
@@ -121,25 +120,3 @@ func (d *DatabaseRole) isEquivalentTo(inSpec apiv1.RoleConfiguration) bool {
return reflect.DeepEqual(role, spec) && d.hasSameValidUntilAs(inSpec)
}
-
-// RoleManager abstracts the functionality of reconciling with PostgreSQL roles
-type RoleManager interface {
- // List the roles in the database
- List(ctx context.Context) ([]DatabaseRole, error)
- // Update the role in the database
- Update(ctx context.Context, role DatabaseRole) error
- // Create the role in the database
- Create(ctx context.Context, role DatabaseRole) error
- // Delete the role in the database
- Delete(ctx context.Context, role DatabaseRole) error
- // GetLastTransactionID returns the last TransactionID as the `xmin`
- // from the database
- // See https://www.postgresql.org/docs/current/datatype-oid.html for reference
- GetLastTransactionID(ctx context.Context, role DatabaseRole) (int64, error)
- // UpdateComment Update the comment of role in the database
- UpdateComment(ctx context.Context, role DatabaseRole) error
- // UpdateMembership Update the In Role membership of role in the database
- UpdateMembership(ctx context.Context, role DatabaseRole, rolesToGrant []string, rolesToRevoke []string) error
- // GetParentRoles returns the roles the given role is a member of
- GetParentRoles(ctx context.Context, role DatabaseRole) ([]string, error)
-}
diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go
index 7ef2ea2ff0..b6d7b1bf13 100644
--- a/internal/management/controller/roles/postgres.go
+++ b/internal/management/controller/roles/postgres.go
@@ -19,6 +19,7 @@ package roles
import (
"context"
"database/sql"
+ "errors"
"fmt"
"strings"
@@ -28,26 +29,14 @@ import (
"github.com/lib/pq"
)
-// PostgresRoleManager is a RoleManager for a database instance
-type PostgresRoleManager struct {
- superUserDB *sql.DB
-}
-
-// NewPostgresRoleManager returns an implementation of RoleManager for postgres
-func NewPostgresRoleManager(superDB *sql.DB) RoleManager {
- return PostgresRoleManager{
- superUserDB: superDB,
- }
-}
-
// List the available roles excluding all the roles that start with `pg_`
-func (sm PostgresRoleManager) List(
- ctx context.Context,
-) ([]DatabaseRole, error) {
+func List(ctx context.Context, db *sql.DB) ([]DatabaseRole, error) {
logger := log.FromContext(ctx).WithName("roles_reconciler")
- wrapErr := func(err error) error { return fmt.Errorf("while listing DB roles for DRM: %w", err) }
+ wrapErr := func(err error) error {
+ return fmt.Errorf("while listing DB roles for role reconciler: %w", err)
+ }
- rows, err := sm.superUserDB.QueryContext(
+ rows, err := db.QueryContext(
ctx,
`SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls,
@@ -109,11 +98,11 @@ func (sm PostgresRoleManager) List(
}
// Update the role
-func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) error {
+func Update(ctx context.Context, db *sql.DB, role DatabaseRole) error {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while updating role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while updating role %s with role reconciler: %w", role.Name, err)
}
var query strings.Builder
@@ -124,7 +113,7 @@ func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) err
// will change no matter what, the next reconciliation cycle we would update the password
appendPasswordOption(role, &query)
- _, err := sm.superUserDB.ExecContext(ctx, query.String())
+ _, err := db.ExecContext(ctx, query.String())
if err != nil {
return wrapErr(err)
}
@@ -133,11 +122,11 @@ func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) err
// Create the role
// TODO: do we give the role any database-level permissions?
-func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) error {
+func Create(ctx context.Context, db *sql.DB, role DatabaseRole) error {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while creating role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while creating role %s with role reconciler: %w", role.Name, err)
}
var query strings.Builder
@@ -150,7 +139,7 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err
// NOTE: defensively we might think of doing CREATE ... IF EXISTS
// but at least during development, we want to catch the error
// Even after, this may be "the kubernetes way"
- if _, err := sm.superUserDB.ExecContext(ctx, query.String()); err != nil {
+ if _, err := db.ExecContext(ctx, query.String()); err != nil {
return wrapErr(err)
}
@@ -159,7 +148,7 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err
query.WriteString(fmt.Sprintf("COMMENT ON ROLE %s IS %s",
pgx.Identifier{role.Name}.Sanitize(), pq.QuoteLiteral(role.Comment)))
- if _, err := sm.superUserDB.ExecContext(ctx, query.String()); err != nil {
+ if _, err := db.ExecContext(ctx, query.String()); err != nil {
return wrapErr(err)
}
}
@@ -168,16 +157,16 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err
}
// Delete the role
-func (sm PostgresRoleManager) Delete(ctx context.Context, role DatabaseRole) error {
+func Delete(ctx context.Context, db *sql.DB, role DatabaseRole) error {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while deleting role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while deleting role %s with role reconciler: %w", role.Name, err)
}
query := fmt.Sprintf("DROP ROLE %s", pgx.Identifier{role.Name}.Sanitize())
contextLog.Debug("Dropping", "query", query)
- _, err := sm.superUserDB.ExecContext(ctx, query)
+ _, err := db.ExecContext(ctx, query)
if err != nil {
return wrapErr(err)
}
@@ -187,18 +176,18 @@ func (sm PostgresRoleManager) Delete(ctx context.Context, role DatabaseRole) err
// GetLastTransactionID get the last xmin for the role, to help keep track of
// whether the role has been changed in on the Database since last reconciliation
-func (sm PostgresRoleManager) GetLastTransactionID(ctx context.Context, role DatabaseRole) (int64, error) {
+func GetLastTransactionID(ctx context.Context, db *sql.DB, role DatabaseRole) (int64, error) {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while getting last xmin for role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while getting last xmin for role %s with role reconciler: %w", role.Name, err)
}
var xmin int64
- err := sm.superUserDB.QueryRowContext(ctx,
+ err := db.QueryRowContext(ctx,
`SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1`,
role.Name).Scan(&xmin)
- if err == sql.ErrNoRows {
+ if errors.Is(err, sql.ErrNoRows) {
return 0, wrapErr(err)
}
if err != nil {
@@ -209,17 +198,17 @@ func (sm PostgresRoleManager) GetLastTransactionID(ctx context.Context, role Dat
}
// UpdateComment of the role
-func (sm PostgresRoleManager) UpdateComment(ctx context.Context, role DatabaseRole) error {
+func UpdateComment(ctx context.Context, db *sql.DB, role DatabaseRole) error {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while updating comment for role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while updating comment for role %s with role reconciler: %w", role.Name, err)
}
query := fmt.Sprintf("COMMENT ON ROLE %s IS %s",
pgx.Identifier{role.Name}.Sanitize(), pq.QuoteLiteral(role.Comment))
contextLog.Debug("Updating comment", "query", query)
- _, err := sm.superUserDB.ExecContext(ctx, query)
+ _, err := db.ExecContext(ctx, query)
if err != nil {
return wrapErr(err)
}
@@ -232,8 +221,9 @@ func (sm PostgresRoleManager) UpdateComment(ctx context.Context, role DatabaseRo
// IMPORTANT: the various REVOKE and GRANT commands that may be required to
// reconcile the role will be done in a single transaction. So, if any one
// of them fails, the role will not get updated
-func (sm PostgresRoleManager) UpdateMembership(
+func UpdateMembership(
ctx context.Context,
+ db *sql.DB,
role DatabaseRole,
rolesToGrant []string,
rolesToRevoke []string,
@@ -241,7 +231,7 @@ func (sm PostgresRoleManager) UpdateMembership(
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while updating memberships for role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while updating memberships for role %s with role reconciler: %w", role.Name, err)
}
if len(rolesToRevoke)+len(rolesToGrant) == 0 {
contextLog.Debug("No membership change query to execute for role")
@@ -261,20 +251,20 @@ func (sm PostgresRoleManager) UpdateMembership(
)
}
- tx, err := sm.superUserDB.BeginTx(ctx, nil)
+ tx, err := db.BeginTx(ctx, nil)
if err != nil {
return wrapErr(err)
}
defer func() {
rollbackErr := tx.Rollback()
- if rollbackErr != nil && rollbackErr != sql.ErrTxDone {
+ if rollbackErr != nil && !errors.Is(rollbackErr, sql.ErrTxDone) {
contextLog.Error(rollbackErr, "rolling back transaction")
}
}()
for _, sqlQuery := range queries {
contextLog.Debug("Executing query", "sqlQuery", sqlQuery)
- if _, err := sm.superUserDB.ExecContext(ctx, sqlQuery); err != nil {
+ if _, err := db.ExecContext(ctx, sqlQuery); err != nil {
contextLog.Error(err, "executing query", "sqlQuery", sqlQuery, "err", err)
return wrapErr(err)
}
@@ -283,14 +273,11 @@ func (sm PostgresRoleManager) UpdateMembership(
}
// GetParentRoles get the in roles of this role
-func (sm PostgresRoleManager) GetParentRoles(
- ctx context.Context,
- role DatabaseRole,
-) ([]string, error) {
+func GetParentRoles(ctx context.Context, db *sql.DB, role DatabaseRole) ([]string, error) {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Trace("Invoked", "role", role)
wrapErr := func(err error) error {
- return fmt.Errorf("while getting parents for role %s with DRM: %w", role.Name, err)
+ return fmt.Errorf("while getting parents for role %s with role reconciler: %w", role.Name, err)
}
query := `SELECT mem.inroles
FROM pg_catalog.pg_authid as auth
@@ -301,8 +288,8 @@ func (sm PostgresRoleManager) GetParentRoles(
WHERE rolname = $1`
contextLog.Debug("get parent role", "query", query)
var parentRoles pq.StringArray
- err := sm.superUserDB.QueryRowContext(ctx, query, role.Name).Scan(&parentRoles)
- if err == sql.ErrNoRows {
+ err := db.QueryRowContext(ctx, query, role.Name).Scan(&parentRoles)
+ if errors.Is(err, sql.ErrNoRows) {
return nil, wrapErr(err)
}
if err != nil {
@@ -364,9 +351,7 @@ func appendRoleOptions(role DatabaseRole, query *strings.Builder) {
query.WriteString(fmt.Sprintf(" CONNECTION LIMIT %d", role.ConnectionLimit))
}
-func appendPasswordOption(role DatabaseRole,
- query *strings.Builder,
-) {
+func appendPasswordOption(role DatabaseRole, query *strings.Builder) {
switch {
case role.ignorePassword:
// Postgres may allow to set the VALID UNTIL of a role independently of
diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go
index 60fdbbe99f..01f3dd1dc9 100644
--- a/internal/management/controller/roles/postgres_test.go
+++ b/internal/management/controller/roles/postgres_test.go
@@ -127,37 +127,18 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
wantedRoleWithDefaultConnectionLimit.Name)
wantedRoleCommentStmt := fmt.Sprintf(
- "COMMENT ON ROLE \"%s\" IS %s",
+ wantedRoleCommentTpl,
wantedRole.Name, pq.QuoteLiteral(wantedRole.Comment))
wantedRoleExpectedAltStmt := fmt.Sprintf(
"ALTER ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 2 ",
wantedRole.Name)
unWantedRoleExpectedDelStmt := fmt.Sprintf("DROP ROLE \"%s\"", unWantedRole.Name)
- expectedSelStmt := `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
- rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls,
- pg_catalog.shobj_description(auth.oid, 'pg_authid') as comment, auth.xmin,
- mem.inroles
- FROM pg_catalog.pg_authid as auth
- LEFT JOIN (
- SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member
- FROM pg_auth_members GROUP BY member
- ) mem ON member = oid
- WHERE rolname not like 'pg\_%'`
-
- expectedMembershipStmt := `SELECT mem.inroles
- FROM pg_catalog.pg_authid as auth
- LEFT JOIN (
- SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member
- FROM pg_auth_members GROUP BY member
- ) mem ON member = oid
- WHERE rolname = $1`
// Testing List
It("List can read the list of roles from the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
testDate := time.Date(2023, 4, 4, 0, 0, 0, 0, time.UTC)
@@ -182,7 +163,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
}, false, []byte("This is streaming_replica user"), 22, []byte(`{"role1","role2"}`))
mock.ExpectQuery(expectedSelStmt).WillReturnRows(rows)
mock.ExpectExec("CREATE ROLE foo").WillReturnResult(sqlmock.NewResult(11, 1))
- roles, err := prm.List(ctx)
+ roles, err := List(ctx, db)
Expect(err).ShouldNot(HaveOccurred())
Expect(roles).To(HaveLen(3))
password1 := sql.NullString{
@@ -231,46 +212,42 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("List returns error if there is a problem with the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
dbError := errors.New("Kaboom")
mock.ExpectQuery(expectedSelStmt).WillReturnError(dbError)
- roles, err := prm.List(ctx)
+ roles, err := List(ctx, db)
Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(BeEquivalentTo("while listing DB roles for DRM: Kaboom"))
+ Expect(err.Error()).To(BeEquivalentTo("while listing DB roles for role reconciler: Kaboom"))
Expect(roles).To(BeEmpty())
})
// Testing Create
It("Create will send a correct CREATE to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleExpectedCrtStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
mock.ExpectExec(wantedRoleCommentStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
- err = prm.Create(ctx, internalWantedRole.toDatabaseRole())
+ err = Create(ctx, db, internalWantedRole.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
It("Create will return error if there is a problem creating the role in the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
dbError := errors.New("Kaboom")
mock.ExpectExec(wantedRoleExpectedCrtStmt).
WillReturnError(dbError)
- err = prm.Create(ctx, internalWantedRole.toDatabaseRole())
+ err = Create(ctx, db, internalWantedRole.toDatabaseRole())
Expect(err).To(HaveOccurred())
Expect(errors.Unwrap(err)).To(BeEquivalentTo(dbError))
})
It("Create will send a correct CREATE with password to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleWithPassExpectedCrtStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
@@ -281,13 +258,12 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
// In this unit test we are not testing the retrieval of secrets, so let's
// fetch the password content by hand
dbRole.password = sql.NullString{Valid: true, String: "myPassword"}
- err = prm.Create(ctx, dbRole)
+ err = Create(ctx, db, dbRole)
Expect(err).ShouldNot(HaveOccurred())
})
It("Create will send a correct CREATE with perpetual password to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleWithoutValidUntilExpectedCrtStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
@@ -300,32 +276,30 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
// In this unit test we are not testing the retrieval of secrets, so let's
// fetch the password content by hand
dbRole.password = sql.NullString{Valid: true, String: "myPassword"}
- err = prm.Create(ctx, dbRole)
+ err = Create(ctx, db, dbRole)
Expect(err).ShouldNot(HaveOccurred())
})
It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleWithPassDeletionExpectedCrtStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
mock.ExpectExec(wantedRoleCommentStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
- err = prm.Create(ctx,
+ err = Create(ctx, db,
roleConfigurationAdapter{RoleConfiguration: wantedRoleWithPassDeletion}.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleWithDefaultConnectionLimitExpectedCrtStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
- err = prm.Create(ctx,
+ err = Create(ctx, db,
roleConfigurationAdapter{RoleConfiguration: wantedRoleWithDefaultConnectionLimit}.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
@@ -333,24 +307,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("Delete will send a correct DROP to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(unWantedRoleExpectedDelStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
- err = prm.Delete(ctx, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole())
+ err = Delete(ctx, db, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
It("Delete will return error if there is a problem deleting the role in the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
dbError := errors.New("Kaboom")
mock.ExpectExec(unWantedRoleExpectedDelStmt).
WillReturnError(dbError)
- err = prm.Delete(ctx, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole())
+ err = Delete(ctx, db, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole())
Expect(err).To(HaveOccurred())
coreErr := errors.Unwrap(err)
Expect(coreErr).To(BeEquivalentTo(dbError))
@@ -359,23 +331,21 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("Update will send a correct ALTER to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleExpectedAltStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
- err = prm.Update(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
+ err = Update(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
It("Update will return error if there is a problem updating the role in the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
dbError := errors.New("Kaboom")
mock.ExpectExec(wantedRoleExpectedAltStmt).
WillReturnError(dbError)
- err = prm.Update(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
+ err = Update(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
Expect(err).To(HaveOccurred())
Expect(errors.Is(err, dbError)).To(BeTrue())
})
@@ -384,24 +354,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("UpdateComment will send a correct COMMENT to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectExec(wantedRoleCommentStmt).
WillReturnResult(sqlmock.NewResult(2, 3))
- err = prm.UpdateComment(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
+ err = UpdateComment(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
It("UpdateComment will return error if there is a problem updating the role in the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
dbError := errors.New("Kaboom")
mock.ExpectExec(wantedRoleCommentStmt).
WillReturnError(dbError)
- err = prm.UpdateComment(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
+ err = UpdateComment(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole())
Expect(err).To(HaveOccurred())
Expect(errors.Is(err, dbError)).To(BeTrue())
})
@@ -409,7 +377,6 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("GetParentRoles will return the roles a given role belongs to", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
rows := sqlmock.NewRows([]string{
"inroles",
@@ -417,7 +384,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
AddRow([]byte(`{"role1","role2"}`))
mock.ExpectQuery(expectedMembershipStmt).WithArgs("foo").WillReturnRows(rows)
- roles, err := prm.GetParentRoles(ctx, DatabaseRole{Name: "foo"})
+ roles, err := GetParentRoles(ctx, db, DatabaseRole{Name: "foo"})
Expect(err).ShouldNot(HaveOccurred())
Expect(roles).To(HaveLen(2))
Expect(roles).To(ConsistOf("role1", "role2"))
@@ -426,10 +393,9 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("GetParentRoles will error if there is a problem querying the database", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
mock.ExpectQuery(expectedMembershipStmt).WithArgs("foo").WillReturnError(fmt.Errorf("kaboom"))
- roles, err := prm.GetParentRoles(ctx, DatabaseRole{Name: "foo"})
+ roles, err := GetParentRoles(ctx, db, DatabaseRole{Name: "foo"})
Expect(err).Should(HaveOccurred())
Expect(roles).To(BeEmpty())
})
@@ -437,7 +403,6 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("UpdateMembership will send correct GRANT and REVOKE statements to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
expectedMembershipExecs := []string{
`GRANT "pg_monitor" TO "foo"`,
@@ -454,14 +419,13 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
mock.ExpectCommit()
- err = prm.UpdateMembership(ctx, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"})
+ err = UpdateMembership(ctx, db, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"})
Expect(err).ShouldNot(HaveOccurred())
})
It("UpdateMembership will roll back if there is an error in the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
okMembership := `GRANT "pg_monitor" TO "foo"`
badMembership := `GRANT "quux" TO "foo"`
@@ -474,7 +438,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
mock.ExpectRollback()
- err = prm.UpdateMembership(ctx, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"})
+ err = UpdateMembership(ctx, db, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"})
Expect(err).Should(HaveOccurred())
})
@@ -551,23 +515,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
It("Getting the proper TransactionID per rol", func(ctx SpecContext) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- prm := NewPostgresRoleManager(db)
rows := mock.NewRows([]string{"xmin"})
lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1"
dbRole := roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()
mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(errors.New("Kaboom"))
- _, err = prm.GetLastTransactionID(ctx, dbRole)
+ _, err = GetLastTransactionID(ctx, db, dbRole)
Expect(err).To(HaveOccurred())
mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(sql.ErrNoRows)
- _, err = prm.GetLastTransactionID(ctx, dbRole)
+ _, err = GetLastTransactionID(ctx, db, dbRole)
Expect(err).To(HaveOccurred())
rows.AddRow("1321")
mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnRows(rows)
- transID, err := prm.GetLastTransactionID(ctx, dbRole)
+ transID, err := GetLastTransactionID(ctx, db, dbRole)
Expect(err).ToNot(HaveOccurred())
Expect(transID).To(BeEquivalentTo(1321))
})
diff --git a/internal/management/controller/roles/reconciler.go b/internal/management/controller/roles/reconciler.go
index 09190ab23e..af850c7f72 100644
--- a/internal/management/controller/roles/reconciler.go
+++ b/internal/management/controller/roles/reconciler.go
@@ -56,8 +56,7 @@ func Reconcile(
}
contextLogger.Debug("getting the managed roles status")
- roleManager := NewPostgresRoleManager(db)
- rolesInDB, err := roleManager.List(ctx)
+ rolesInDB, err := List(ctx, db)
if err != nil {
return reconcile.Result{}, err
}
diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go
index a126b73ef4..8e49d9a692 100644
--- a/internal/management/controller/roles/reconciler_test.go
+++ b/internal/management/controller/roles/reconciler_test.go
@@ -53,7 +53,7 @@ var _ = Describe("Role reconciler test", func() {
},
},
}
- pgStringError := "while listing DB roles for DRM: " +
+ pgStringError := "while listing DB roles for role reconciler: " +
"failed to connect to `user=postgres database=postgres`: " +
"/controller/run/.s.PGSQL.5432 (/controller/run): " +
"dial error: dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory"
diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go
index 1eed8f037d..58c127da00 100644
--- a/internal/management/controller/roles/runnable.go
+++ b/internal/management/controller/roles/runnable.go
@@ -49,12 +49,21 @@ const (
roleUpdateMemberships roleAction = "UPDATE_MEMBERSHIPS"
)
+type instanceInterface interface {
+ GetSuperUserDB() (*sql.DB, error)
+ IsPrimary() (bool, error)
+ RoleSynchronizerChan() <-chan *apiv1.ManagedConfiguration
+ IsServerHealthy() error
+ GetClusterName() string
+ GetNamespaceName() string
+}
+
// A RoleSynchronizer is a Kubernetes manager.Runnable
// that makes sure the Roles in the PostgreSQL databases are in sync with the spec
//
// c.f. https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#Runnable
type RoleSynchronizer struct {
- instance *postgres.Instance
+ instance instanceInterface
client client.Client
}
@@ -130,12 +139,6 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed
return nil
}
- superUserDB, err := sr.instance.GetSuperUserDB()
- if err != nil {
- return fmt.Errorf("while reconciling managed roles: %w", err)
- }
- roleManager := NewPostgresRoleManager(superUserDB)
-
var remoteCluster apiv1.Cluster
if err = sr.client.Get(ctx, types.NamespacedName{
Name: sr.instance.GetClusterName(),
@@ -148,7 +151,11 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed
if rolePasswords == nil {
rolePasswords = map[string]apiv1.PasswordState{}
}
- appliedState, irreconcilableRoles, err := sr.synchronizeRoles(ctx, roleManager, config, rolePasswords)
+ superUserDB, err := sr.instance.GetSuperUserDB()
+ if err != nil {
+ return fmt.Errorf("while getting superuser connection: %w", err)
+ }
+ appliedState, irreconcilableRoles, err := sr.synchronizeRoles(ctx, superUserDB, config, rolePasswords)
if err != nil {
return fmt.Errorf("while syncrhonizing managed roles: %w", err)
}
@@ -174,9 +181,13 @@ func getRoleNames(roles []roleConfigurationAdapter) []string {
}
// synchronizeRoles aligns roles in the database to the spec
+// It returns
+// - the PasswordState for any updated roles
+// - any roles that had expectable postgres errors
+// - any unexpected error
func (sr *RoleSynchronizer) synchronizeRoles(
ctx context.Context,
- roleManager RoleManager,
+ db *sql.DB,
config *apiv1.ManagedConfiguration,
storedPasswordState map[string]apiv1.PasswordState,
) (map[string]apiv1.PasswordState, map[string][]string, error) {
@@ -185,22 +196,18 @@ func (sr *RoleSynchronizer) synchronizeRoles(
if err != nil {
return nil, nil, err
}
- rolesInDB, err := roleManager.List(ctx)
+ rolesInDB, err := List(ctx, db)
if err != nil {
return nil, nil, err
}
rolesByAction := evaluateNextRoleActions(
ctx, config, rolesInDB, storedPasswordState, latestSecretResourceVersion)
+
+ passwordStates, irreconcilableRoles, err := sr.applyRoleActions(ctx, db, rolesByAction)
if err != nil {
- return nil, nil, fmt.Errorf("while syncrhonizing managed roles: %w", err)
+ return nil, nil, err
}
- passwordStates, irreconcilableRoles := sr.applyRoleActions(
- ctx,
- roleManager,
- rolesByAction,
- )
-
// Merge the status from database into spec. We should keep all the status
// otherwise in the next loop the user without status will be marked as need update
for role, stateInDatabase := range passwordStates {
@@ -213,31 +220,33 @@ func (sr *RoleSynchronizer) synchronizeRoles(
// It returns the apiv1.PasswordState for each role, as well as a map of roles that
// cannot be reconciled for expectable errors, e.g. dropping a role owning content
//
-// NOTE: applyRoleActions will not error out if a single role operation fails.
-// This is designed so that a role configuration that cannot be honored by PostgreSQL
-// cannot stop the reconciliation loop and prevent other roles from being applied
+// NOTE: applyRoleActions will carry on after an expectable error, i.e. an error
+// due to an invalid request for postgres. This is so that other actions will not
+// be blocked by a user error.
+// It will, however, error out on unexpected errors.
func (sr *RoleSynchronizer) applyRoleActions(
ctx context.Context,
- roleManager RoleManager,
+ db *sql.DB,
rolesByAction rolesByAction,
-) (map[string]apiv1.PasswordState, map[string][]string) {
+) (map[string]apiv1.PasswordState, map[string][]string, error) {
contextLog := log.FromContext(ctx).WithName("roles_reconciler")
contextLog.Debug("applying role actions")
irreconcilableRoles := make(map[string][]string)
appliedChanges := make(map[string]apiv1.PasswordState)
- handleRoleError := func(errToEvaluate error, roleName string, action roleAction) {
+ handleRoleError := func(errToEvaluate error, roleName string, action roleAction) error {
// log unexpected errors, collect expectable PostgreSQL errors
if errToEvaluate == nil {
- return
+ return nil
}
roleError, err := parseRoleError(errToEvaluate, roleName, action)
if err != nil {
contextLog.Error(err, "while performing "+string(action), "role", roleName)
- return
+ return err
}
irreconcilableRoles[roleName] = append(irreconcilableRoles[roleName], roleError.Error())
+ return nil
}
for action, roles := range rolesByAction {
@@ -251,44 +260,48 @@ func (sr *RoleSynchronizer) applyRoleActions(
"roles", getRoleNames(roles), "action", action)
for _, role := range roles {
+ var (
+ err error
+ appliedState apiv1.PasswordState
+ grants, revokes []string
+ )
switch action {
case roleCreate, roleUpdate:
- appliedState, err := sr.applyRoleCreateUpdate(ctx, roleManager, role, action)
+ appliedState, err = sr.applyRoleCreateUpdate(ctx, db, role, action)
if err == nil {
appliedChanges[role.Name] = appliedState
}
- handleRoleError(err, role.Name, action)
case roleDelete:
- err := roleManager.Delete(ctx, role.toDatabaseRole())
- handleRoleError(err, role.Name, action)
+ err = Delete(ctx, db, role.toDatabaseRole())
case roleSetComment:
// NOTE: adding/updating a comment on a role does not alter its TransactionID
- err := roleManager.UpdateComment(ctx, role.toDatabaseRole())
- handleRoleError(err, role.Name, action)
+ err = UpdateComment(ctx, db, role.toDatabaseRole())
case roleUpdateMemberships:
// NOTE: revoking / granting to a role does not alter its TransactionID
dbRole := role.toDatabaseRole()
- grants, revokes, err := getRoleMembershipDiff(ctx, roleManager, role, dbRole)
- if err != nil {
- contextLog.Error(err, "while performing "+string(action), "role", role.Name)
- continue
+ grants, revokes, err = getRoleMembershipDiff(ctx, db, role, dbRole)
+ if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil {
+ return nil, nil, unhandledErr
}
- err = roleManager.UpdateMembership(ctx, dbRole, grants, revokes)
- handleRoleError(err, role.Name, action)
+
+ err = UpdateMembership(ctx, db, dbRole, grants, revokes)
+ }
+ if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil {
+ return nil, nil, unhandledErr
}
}
}
- return appliedChanges, irreconcilableRoles
+ return appliedChanges, irreconcilableRoles, nil
}
func getRoleMembershipDiff(
ctx context.Context,
- roleManager RoleManager,
+ db *sql.DB,
role roleConfigurationAdapter,
dbRole DatabaseRole,
) ([]string, []string, error) {
- inRoleInDB, err := roleManager.GetParentRoles(ctx, dbRole)
+ inRoleInDB, err := GetParentRoles(ctx, db, dbRole)
if err != nil {
return nil, nil, err
}
@@ -302,7 +315,7 @@ func getRoleMembershipDiff(
// Returns the PasswordState, as well as any error encountered
func (sr *RoleSynchronizer) applyRoleCreateUpdate(
ctx context.Context,
- roleManager RoleManager,
+ db *sql.DB,
role roleConfigurationAdapter,
action roleAction,
) (apiv1.PasswordState, error) {
@@ -332,15 +345,15 @@ func (sr *RoleSynchronizer) applyRoleCreateUpdate(
var err error
switch action {
case roleCreate:
- err = roleManager.Create(ctx, databaseRole)
+ err = Create(ctx, db, databaseRole)
case roleUpdate:
- err = roleManager.Update(ctx, databaseRole)
+ err = Update(ctx, db, databaseRole)
}
if err != nil {
return apiv1.PasswordState{}, err
}
- transactionID, err := roleManager.GetLastTransactionID(ctx, databaseRole)
+ transactionID, err := GetLastTransactionID(ctx, db, databaseRole)
if err != nil {
return apiv1.PasswordState{}, err
}
diff --git a/internal/management/controller/roles/runnable_test.go b/internal/management/controller/roles/runnable_test.go
index 4ba41763c0..370ac6bab2 100644
--- a/internal/management/controller/roles/runnable_test.go
+++ b/internal/management/controller/roles/runnable_test.go
@@ -18,11 +18,17 @@ package roles
import (
"context"
+ "database/sql"
"fmt"
+ "time"
+ "github.com/DATA-DOG/go-sqlmock"
"github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+ "github.com/lib/pq"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -33,241 +39,87 @@ import (
. "github.com/onsi/gomega"
)
-type funcCall struct{ verb, roleName string }
-
-type mockRoleManager struct {
- roles map[string]DatabaseRole
- callHistory []funcCall
-}
-
-func (m *mockRoleManager) List(_ context.Context) ([]DatabaseRole, error) {
- m.callHistory = append(m.callHistory, funcCall{"list", ""})
- re := make([]DatabaseRole, len(m.roles))
- i := 0
- for _, r := range m.roles {
- re[i] = r
- i++
- }
- return re, nil
-}
-
-func (m *mockRoleManager) Update(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"update", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("tring to update unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
-}
-
-func (m *mockRoleManager) UpdateComment(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"updateComment", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("tring to update comment of unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
-}
-
-func (m *mockRoleManager) Create(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"create", role.Name})
- _, found := m.roles[role.Name]
- if found {
- return fmt.Errorf("tring to create existing role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
-}
-
-func (m *mockRoleManager) Delete(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"delete", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("tring to delete unknown role: %s", role.Name)
- }
- delete(m.roles, role.Name)
- return nil
-}
-
-func (m *mockRoleManager) GetLastTransactionID(_ context.Context, _ DatabaseRole) (int64, error) {
- return 0, nil
-}
-
-func (m *mockRoleManager) UpdateMembership(
- _ context.Context,
- role DatabaseRole,
- _ []string,
- _ []string,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"updateMembership", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("trying to update Role Members of unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
-}
-
-func (m *mockRoleManager) GetParentRoles(_ context.Context, role DatabaseRole) ([]string, error) {
- m.callHistory = append(m.callHistory, funcCall{"getParentRoles", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return nil, fmt.Errorf("trying to get parent of unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil, nil
-}
-
-// mock.ExpectExec(unWantedRoleExpectedDelStmt).
-// WillReturnError(&pgconn.PgError{Code: "2BP01"})
-
-type mockRoleManagerWithError struct {
- roles map[string]DatabaseRole
- callHistory []funcCall
-}
-
-func (m *mockRoleManagerWithError) List(_ context.Context) ([]DatabaseRole, error) {
- m.callHistory = append(m.callHistory, funcCall{"list", ""})
- re := make([]DatabaseRole, len(m.roles))
- i := 0
- for _, r := range m.roles {
- re[i] = r
- i++
- }
- return re, nil
-}
-
-func (m *mockRoleManagerWithError) Update(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"update", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("tring to update unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
-}
-
-func (m *mockRoleManagerWithError) UpdateComment(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"updateComment", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("tring to update comment of unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
+type fakeInstanceData struct {
+ *postgres.Instance
+ db *sql.DB
}
-func (m *mockRoleManagerWithError) Create(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"create", role.Name})
- _, found := m.roles[role.Name]
- if found {
- return fmt.Errorf("tring to create existing role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil
+func (f *fakeInstanceData) GetSuperUserDB() (*sql.DB, error) {
+ return f.db, nil
}
-func (m *mockRoleManagerWithError) Delete(
- _ context.Context, role DatabaseRole,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"delete", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("tring to delete unknown role: %s", role.Name)
- }
- return fmt.Errorf("could not delete role 'foo': %w",
- &pgconn.PgError{
- Code: "2BP01", Detail: "owner of database edbDatabase",
- Message: `role "dante" cannot be dropped because some objects depend on it`,
+var _ = Describe("Role synchronizer tests", func() {
+ var (
+ db *sql.DB
+ mock sqlmock.Sqlmock
+ err error
+ roleSynchronizer RoleSynchronizer
+ )
+
+ BeforeEach(func() {
+ db, mock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+ DeferCleanup(func() {
+ Expect(mock.ExpectationsWereMet()).To(Succeed())
})
-}
-
-func (m *mockRoleManagerWithError) GetLastTransactionID(_ context.Context, _ DatabaseRole) (int64, error) {
- return 0, nil
-}
-
-func (m *mockRoleManagerWithError) UpdateMembership(
- _ context.Context,
- role DatabaseRole,
- _ []string,
- _ []string,
-) error {
- m.callHistory = append(m.callHistory, funcCall{"updateMembership", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return fmt.Errorf("trying to update Role Members of unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return &pgconn.PgError{Code: "42704", Message: "unknown role 'blah'"}
-}
-
-func (m *mockRoleManagerWithError) GetParentRoles(_ context.Context, role DatabaseRole) ([]string, error) {
- m.callHistory = append(m.callHistory, funcCall{"getParentRoles", role.Name})
- _, found := m.roles[role.Name]
- if !found {
- return nil, fmt.Errorf("trying to get parent of unknown role: %s", role.Name)
- }
- m.roles[role.Name] = role
- return nil, nil
-}
-var _ = Describe("Role synchronizer tests", func() {
- roleSynchronizer := RoleSynchronizer{
- instance: postgres.NewInstance().WithNamespace("myPod"),
- }
+ testDate := time.Date(2023, 4, 4, 0, 0, 0, 0, time.UTC)
+
+ rowsInMockDatabase := sqlmock.NewRows([]string{
+ "rolname", "rolsuper", "rolinherit", "rolcreaterole", "rolcreatedb",
+ "rolcanlogin", "rolreplication", "rolconnlimit", "rolpassword", "rolvaliduntil", "rolbypassrls", "comment",
+ "xmin", "inroles",
+ }).
+ AddRow("postgres", true, false, true, true, true, false, -1, []byte("12345"),
+ nil, false, []byte("This is postgres user"), 11, []byte("{}")).
+ AddRow("streaming_replica", false, false, true, true, false, true, 10, []byte("54321"),
+ pgtype.Timestamp{
+ Valid: true,
+ Time: testDate,
+ InfinityModifier: pgtype.Finite,
+ }, false, []byte("This is streaming_replica user"), 22, []byte(`{"role1","role2"}`)).
+ AddRow("role_to_ignore", true, false, true, true, true, false, -1, []byte("12345"),
+ nil, false, []byte("This is a custom role in the DB"), 11, []byte("{}")).
+ AddRow("role_to_test1", true, true, false, false, false, false, -1, []byte("12345"),
+ nil, false, []byte("This is a role to test with"), 11, []byte("{}")).
+ AddRow("role_to_test2", true, true, false, false, false, false, -1, []byte("12345"),
+ nil, false, []byte("This is a role to test with"), 11, []byte("{inrole}"))
+ mock.ExpectQuery(expectedSelStmt).WillReturnRows(rowsInMockDatabase)
+
+ roleSynchronizer = RoleSynchronizer{
+ instance: &fakeInstanceData{
+ Instance: postgres.NewInstance().WithNamespace("default"),
+ db: db,
+ },
+ }
+ })
When("role configurations are realizable", func() {
It("it will Create ensure:present roles in spec missing from DB", func(ctx context.Context) {
+ mock.ExpectExec("CREATE ROLE \"foo_bar\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT " +
+ "NOLOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 0").
+ WillReturnResult(sqlmock.NewResult(11, 1))
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
- {
- Name: "edb_test",
- Ensure: apiv1.EnsurePresent,
- },
{
Name: "foo_bar",
Ensure: apiv1.EnsurePresent,
},
},
}
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- },
- }
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ rows := mock.NewRows([]string{"xmin"}).AddRow("12")
+ lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1"
+ mock.ExpectQuery(lastTransactionQuery).WithArgs("foo_bar").WillReturnRows(rows)
+ passwordState, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf,
+ map[string]apiv1.PasswordState{})
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(
- []funcCall{
- {"list", ""},
- {"create", "edb_test"},
- {"create", "foo_bar"},
+ Expect(rolesWithErrors).To(BeEmpty())
+ Expect(passwordState).To(BeEquivalentTo(map[string]apiv1.PasswordState{
+ "foo_bar": {
+ TransactionID: 12,
+ SecretResourceVersion: "",
},
- ))
- Expect(rm.callHistory).To(ConsistOf(
- funcCall{"list", ""},
- funcCall{"create", "edb_test"},
- funcCall{"create", "foo_bar"},
- ))
+ }))
})
It("it will ignore ensure:absent roles in spec missing from DB", func(ctx context.Context) {
@@ -279,324 +131,255 @@ var _ = Describe("Role synchronizer tests", func() {
},
},
}
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- },
- }
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{})
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}))
})
- It("it will ignore DB roles that are not in spec", func(ctx context.Context) {
+ It("it will call the necessary grants to update membership", func(ctx context.Context) {
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
- Ensure: apiv1.EnsureAbsent,
- },
- },
- }
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "ignorezMoi": {
- Name: "ignorezMoi",
+ Name: "role_to_test1",
Superuser: true,
+ Inherit: ptr.To(true),
+ InRoles: []string{
+ "role1",
+ "role2",
+ },
+ Comment: "This is a role to test with",
+ ConnectionLimit: -1,
},
},
}
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ noParents := sqlmock.NewRows([]string{"inroles"}).AddRow([]byte(`{}`))
+ mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test1").WillReturnRows(noParents)
+ mock.ExpectBegin()
+ expectedMembershipExecs := []string{
+ `GRANT "role1" TO "role_to_test1"`,
+ `GRANT "role2" TO "role_to_test1"`,
+ }
+
+ for _, ex := range expectedMembershipExecs {
+ mock.ExpectExec(ex).
+ WillReturnResult(sqlmock.NewResult(2, 3))
+ }
+
+ mock.ExpectCommit()
+
+ _, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 11, // defined in the mock query to the DB above
+ },
+ })
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}))
+ Expect(rolesWithErrors).To(BeEmpty())
})
- It("it will call the updateMembership method", func(ctx context.Context) {
- trueValue := true
+ It("it will call the necessary revokes to update membership", func(ctx context.Context) {
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
- Superuser: true,
- Inherit: &trueValue,
- InRoles: []string{
- "role1",
- "role2",
- },
+ Name: "role_to_test2",
+ Superuser: true,
+ Inherit: ptr.To(true),
+ InRoles: []string{},
+ Comment: "This is a role to test with",
+ ConnectionLimit: -1,
},
},
}
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- Inherit: true,
- },
+ rows := sqlmock.NewRows([]string{
+ "inroles",
+ }).
+ AddRow([]byte(`{"foo"}`))
+ mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test2").WillReturnRows(rows)
+ mock.ExpectBegin()
+
+ mock.ExpectExec(`REVOKE "foo" FROM "role_to_test2"`).
+ WillReturnResult(sqlmock.NewResult(2, 3))
+
+ mock.ExpectCommit()
+
+ _, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{
+ "role_to_test2": {
+ TransactionID: 11, // defined in the mock query to the DB above
},
- }
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ })
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""},
- funcCall{"getParentRoles", "edb_test"},
- funcCall{"updateMembership", "edb_test"}))
+ Expect(rolesWithErrors).To(BeEmpty())
})
It("it will call the updateComment method", func(ctx context.Context) {
- trueValue := true
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
- Superuser: true,
- Inherit: &trueValue,
- Comment: "my comment",
- },
- },
- }
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- Inherit: true,
- Comment: "my tailor is rich",
+ Name: "role_to_test1",
+ Superuser: true,
+ Inherit: ptr.To(true),
+ Comment: "my comment",
+ ConnectionLimit: -1,
},
},
}
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ wantedRoleCommentStmt := fmt.Sprintf(
+ wantedRoleCommentTpl,
+ managedConf.Roles[0].Name, pq.QuoteLiteral(managedConf.Roles[0].Comment))
+ mock.ExpectExec(wantedRoleCommentStmt).WillReturnResult(sqlmock.NewResult(2, 3))
+ _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 11, // defined in the mock query to the DB above
+ },
+ })
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""},
- funcCall{"updateComment", "edb_test"}))
})
It("it will no-op if the roles are reconciled", func(ctx context.Context) {
- trueValue := true
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
- Superuser: true,
- Inherit: &trueValue,
+ Name: "role_to_test1",
+ Superuser: true,
+ Inherit: ptr.To(true),
+ Comment: "This is a role to test with",
+ ConnectionLimit: -1,
},
},
}
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- Inherit: true,
- },
+ _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 11, // defined in the mock query to the DB above
},
- }
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ })
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(
- funcCall{"list", ""}))
})
It("it will Delete ensure:absent roles that are in the DB", func(ctx context.Context) {
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
+ Name: "role_to_test1",
Ensure: apiv1.EnsureAbsent,
},
},
}
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- },
+ roleDeletionStmt := fmt.Sprintf("DROP ROLE \"%s\"", "role_to_test1")
+ mock.ExpectExec(roleDeletionStmt).WillReturnResult(sqlmock.NewResult(2, 3))
+ _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 11, // defined in the mock query to the DB above
},
- }
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ })
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(
- funcCall{"list", ""},
- funcCall{"delete", "edb_test"},
- ))
})
It("it will Update ensure:present roles that are in the DB but have different fields", func(ctx context.Context) {
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
- Ensure: apiv1.EnsurePresent,
- CreateDB: true,
- BypassRLS: true,
+ Name: "role_to_test1",
+ Superuser: false,
+ Inherit: ptr.To(false),
+ Comment: "This is a role to test with",
+ BypassRLS: true,
+ CreateRole: true,
+ Login: true,
+ ConnectionLimit: 2,
},
},
}
- rm := mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
+ alterStmt := fmt.Sprintf(
+ "ALTER ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 2 ",
+ "role_to_test1")
+ mock.ExpectExec(alterStmt).WillReturnResult(sqlmock.NewResult(2, 3))
+ rows := mock.NewRows([]string{"xmin"}).AddRow("12")
+ lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1"
+ mock.ExpectQuery(lastTransactionQuery).WithArgs("role_to_test1").WillReturnRows(rows)
+ passwordState, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf,
+ map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 11, // defined in the mock query to the DB above
},
- },
- }
- _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ })
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(
- funcCall{"list", ""},
- funcCall{"update", "edb_test"},
- ))
+ Expect(rolesWithErrors).To(BeEmpty())
+ Expect(passwordState).To(BeEquivalentTo(map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 12,
+ SecretResourceVersion: "",
+ },
+ }))
})
})
When("role configurations are unrealizable", func() {
- It("it will record that updateMembership could not succeed", func(ctx context.Context) {
- trueValue := true
+ It("it will carry on and capture postgres errors per role", func(ctx context.Context) {
managedConf := apiv1.ManagedConfiguration{
Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
+ Name: "role_to_test1",
Superuser: true,
- Inherit: &trueValue,
+ Inherit: ptr.To(true),
InRoles: []string{
"role1",
"role2",
},
+ Comment: "This is a role to test with",
+ ConnectionLimit: -1,
},
- },
- }
- rm := mockRoleManagerWithError{
- roles: map[string]DatabaseRole{
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- Inherit: true,
- },
- },
- }
- _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
- Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""},
- funcCall{"getParentRoles", "edb_test"},
- funcCall{"updateMembership", "edb_test"}))
- Expect(unrealizable).To(HaveLen(1))
- Expect(unrealizable["edb_test"]).To(HaveLen(1))
- Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo(
- "could not perform UPDATE_MEMBERSHIPS on role edb_test: unknown role 'blah'"))
- })
-
- It("it will record that Delete could not succeed", func(ctx context.Context) {
- managedConf := apiv1.ManagedConfiguration{
- Roles: []apiv1.RoleConfiguration{
{
- Name: "edb_test",
+ Name: "role_to_test2",
Ensure: apiv1.EnsureAbsent,
},
},
}
- rm := mockRoleManagerWithError{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- },
- },
+
+ noParents := sqlmock.NewRows([]string{"inroles"}).AddRow([]byte(`{}`))
+ mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test1").WillReturnRows(noParents)
+ mock.ExpectBegin()
+
+ mock.ExpectExec(`GRANT "role1" TO "role_to_test1"`).
+ WillReturnResult(sqlmock.NewResult(2, 3))
+
+ impossibleGrantError := pgconn.PgError{
+ Code: "0LP01", // 0LP01 -> invalid_grant_operation
+ Message: "unknown role 'role2'",
}
- _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
- Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(
- funcCall{"list", ""},
- funcCall{"delete", "edb_test"},
- ))
- Expect(unrealizable).To(HaveLen(1))
- Expect(unrealizable["edb_test"]).To(HaveLen(1))
- Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo(
- "could not perform DELETE on role edb_test: owner of database edbDatabase"))
- })
+ mock.ExpectExec(`GRANT "role2" TO "role_to_test1"`).
+ WillReturnError(&impossibleGrantError)
- It("it will continue the synchronization even if it finds errors", func(ctx context.Context) {
- trueValue := true
- managedConf := apiv1.ManagedConfiguration{
- Roles: []apiv1.RoleConfiguration{
- {
- Name: "edb_test",
- Ensure: apiv1.EnsureAbsent,
- },
- {
- Name: "another_test",
- Ensure: apiv1.EnsurePresent,
- Superuser: true,
- Inherit: &trueValue,
- InRoles: []string{
- "role1",
- "role2",
- },
- },
- },
+ mock.ExpectRollback()
+
+ impossibleDeleteError := pgconn.PgError{
+ Code: "2BP01", // 2BP01 -> dependent_objects_still_exist
+ Detail: "owner of database edbDatabase",
}
- rm := mockRoleManagerWithError{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "edb_test": {
- Name: "edb_test",
- Superuser: true,
- },
- "another_test": {
- Name: "another_test",
- Superuser: true,
- Inherit: true,
- },
+
+ roleDeletionStmt := fmt.Sprintf("DROP ROLE \"%s\"", "role_to_test2")
+ mock.ExpectExec(roleDeletionStmt).WillReturnError(&impossibleDeleteError)
+
+ _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{
+ "role_to_test1": {
+ TransactionID: 11, // defined in the mock query to the DB above
},
- }
- _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{})
+ })
+
Expect(err).ShouldNot(HaveOccurred())
- Expect(rm.callHistory).To(ConsistOf(
- funcCall{"list", ""},
- funcCall{"delete", "edb_test"},
- funcCall{"getParentRoles", "another_test"},
- funcCall{"updateMembership", "another_test"},
- ))
Expect(unrealizable).To(HaveLen(2))
- Expect(unrealizable["edb_test"]).To(HaveLen(1))
- Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo(
- "could not perform DELETE on role edb_test: owner of database edbDatabase"))
- Expect(unrealizable["another_test"]).To(HaveLen(1))
- Expect(unrealizable["another_test"][0]).To(BeEquivalentTo(
- "could not perform UPDATE_MEMBERSHIPS on role another_test: unknown role 'blah'"))
+ Expect(unrealizable["role_to_test1"]).To(HaveLen(1))
+ Expect(unrealizable["role_to_test1"][0]).To(BeEquivalentTo(
+ "could not perform UPDATE_MEMBERSHIPS on role role_to_test1: unknown role 'role2'"))
+ Expect(unrealizable["role_to_test2"]).To(HaveLen(1))
+ Expect(unrealizable["role_to_test2"][0]).To(BeEquivalentTo(
+ "could not perform DELETE on role role_to_test2: owner of database edbDatabase"))
})
})
})
-var _ = DescribeTable("Role status getter tests",
- func(spec *apiv1.ManagedConfiguration, db mockRoleManager, expected map[string]apiv1.RoleStatus) {
+var _ = DescribeTable("Role status tests",
+ func(spec *apiv1.ManagedConfiguration, roles []DatabaseRole, expected map[string]apiv1.RoleStatus) {
ctx := context.TODO()
- roles, err := db.List(ctx)
- Expect(err).ToNot(HaveOccurred())
-
statusMap := evaluateNextRoleActions(ctx, spec, roles, map[string]apiv1.PasswordState{
"roleWithChangedPassInSpec": {
TransactionID: 101,
@@ -637,17 +420,15 @@ var _ = DescribeTable("Role status getter tests",
},
},
},
- mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "ensurePresent": {
- Name: "ensurePresent",
- Superuser: true,
- Inherit: true,
- },
+ []DatabaseRole{
+ {
+ Name: "postgres",
+ Superuser: true,
+ },
+ {
+ Name: "ensurePresent",
+ Superuser: true,
+ Inherit: true,
},
},
map[string]apiv1.RoleStatus{
@@ -676,20 +457,18 @@ var _ = DescribeTable("Role status getter tests",
},
},
},
- mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "unwantedInDB": {
- Name: "unwantedInDB",
- Superuser: true,
- },
- "drifted": {
- Name: "drifted",
- Superuser: false,
- },
+ []DatabaseRole{
+ {
+ Name: "postgres",
+ Superuser: true,
+ },
+ {
+ Name: "unwantedInDB",
+ Superuser: true,
+ },
+ {
+ Name: "drifted",
+ Superuser: false,
},
},
map[string]apiv1.RoleStatus{
@@ -709,21 +488,19 @@ var _ = DescribeTable("Role status getter tests",
},
},
},
- mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "edb_admin": {
- Name: "edb_admin",
- Superuser: true,
- Inherit: true,
- },
- "missingFromSpec": {
- Name: "missingFromSpec",
- Superuser: false,
- },
+ []DatabaseRole{
+ {
+ Name: "postgres",
+ Superuser: true,
+ },
+ {
+ Name: "edb_admin",
+ Superuser: true,
+ Inherit: true,
+ },
+ {
+ Name: "missingFromSpec",
+ Superuser: false,
},
},
map[string]apiv1.RoleStatus{
@@ -743,18 +520,16 @@ var _ = DescribeTable("Role status getter tests",
},
},
},
- mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "roleWithChangedPassInDB": {
- Name: "roleWithChangedPassInDB",
- Superuser: true,
- transactionID: 102,
- Inherit: true,
- },
+ []DatabaseRole{
+ {
+ Name: "postgres",
+ Superuser: true,
+ },
+ {
+ Name: "roleWithChangedPassInDB",
+ Superuser: true,
+ transactionID: 102,
+ Inherit: true,
},
},
map[string]apiv1.RoleStatus{
@@ -772,18 +547,16 @@ var _ = DescribeTable("Role status getter tests",
},
},
},
- mockRoleManager{
- roles: map[string]DatabaseRole{
- "postgres": {
- Name: "postgres",
- Superuser: true,
- },
- "roleWithChangedPassInSpec": {
- Name: "roleWithChangedPassInSpec",
- Superuser: true,
- transactionID: 101,
- Inherit: true,
- },
+ []DatabaseRole{
+ {
+ Name: "postgres",
+ Superuser: true,
+ },
+ {
+ Name: "roleWithChangedPassInSpec",
+ Superuser: true,
+ transactionID: 101,
+ Inherit: true,
},
},
map[string]apiv1.RoleStatus{
diff --git a/internal/management/controller/roles/suite_test.go b/internal/management/controller/roles/suite_test.go
index a6dd16bc1d..82061021e3 100644
--- a/internal/management/controller/roles/suite_test.go
+++ b/internal/management/controller/roles/suite_test.go
@@ -23,6 +23,29 @@ import (
. "github.com/onsi/gomega"
)
+const (
+ expectedSelStmt = `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
+ rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls,
+ pg_catalog.shobj_description(auth.oid, 'pg_authid') as comment, auth.xmin,
+ mem.inroles
+ FROM pg_catalog.pg_authid as auth
+ LEFT JOIN (
+ SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member
+ FROM pg_auth_members GROUP BY member
+ ) mem ON member = oid
+ WHERE rolname not like 'pg\_%'`
+
+ expectedMembershipStmt = `SELECT mem.inroles
+ FROM pg_catalog.pg_authid as auth
+ LEFT JOIN (
+ SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member
+ FROM pg_auth_members GROUP BY member
+ ) mem ON member = oid
+ WHERE rolname = $1`
+
+ wantedRoleCommentTpl = "COMMENT ON ROLE \"%s\" IS %s"
+)
+
func TestReconciler(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Internal Management Controller Roles Reconciler Suite")
From b4b47447b039d79fd4858cedb2bf60a5900ba9ab Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Thu, 7 Nov 2024 12:30:54 +0100
Subject: [PATCH 136/836] docs(kubectl-plugin): update formatting and version
(#6026)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Fix the formatting of the architecture list and update the version to
match the current one.
Signed-off-by: Marco Nenciarini
Signed-off-by: Niccolò Fei
Co-authored-by: Niccolò Fei
---
docs/src/kubectl-plugin.md | 220 +++++++++++++++++++------------------
hack/release.sh | 2 +
2 files changed, 113 insertions(+), 109 deletions(-)
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index 35c66f2494..793fd706ff 100755
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -30,52 +30,53 @@ them in your systems.
#### Debian packages
-For example, let's install the 1.22.2 release of the plugin, for an Intel based
+For example, let's install the 1.24.1 release of the plugin, for an Intel based
64 bit server. First, we download the right `.deb` file.
-``` sh
-$ wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.22.1/kubectl-cnpg_1.22.2_linux_x86_64.deb
+```sh
+wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.deb \
+ --output-document kube-plugin.deb
```
-Then, install from the local file using `dpkg`:
+Then, with superuser privileges, install from the local file using `dpkg`:
-``` sh
-$ dpkg -i kubectl-cnpg_1.22.2_linux_x86_64.deb
-(Reading database ... 702524 files and directories currently installed.)
-Preparing to unpack kubectl-cnpg_1.22.2_linux_x86_64.deb ...
-Unpacking cnpg (1.22.2) over (1.22.2) ...
-Setting up cnpg (1.22.2) ..
+```console
+$ sudo dpkg -i kube-plugin.deb
+Selecting previously unselected package cnpg.
+(Reading database ... 6688 files and directories currently installed.)
+Preparing to unpack kube-plugin.deb ...
+Unpacking cnpg (1.24.1) ...
+Setting up cnpg (1.24.1) ...
```
#### RPM packages
-As in the example for `.deb` packages, let's install the 1.22.2 release for an
+As in the example for `.rpm` packages, let's install the 1.24.1 release for an
Intel 64 bit machine. Note the `--output` flag to provide a file name.
-``` sh
-curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.22.2/kubectl-cnpg_1.22.2_linux_x86_64.rpm \
+```sh
+curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.rpm \
--output kube-plugin.rpm
```
-Then install with `yum`, and you're ready to use:
+Then, with superuser privileges, install with `yum`, and you're ready to use:
-``` sh
-$ yum --disablerepo=* localinstall kube-plugin.rpm
-yum --disablerepo=* localinstall kube-plugin.rpm
+```console
+$ sudo yum --disablerepo=* localinstall kube-plugin.rpm
Failed to set locale, defaulting to C.UTF-8
Dependencies resolved.
====================================================================================================
Package Architecture Version Repository Size
====================================================================================================
Installing:
- cnpg x86_64 1.22.2-1 @commandline 17 M
+ cnpg x86_64 1.24.1-1 @commandline 20 M
Transaction Summary
====================================================================================================
Install 1 Package
-Total size: 14 M
-Installed size: 43 M
+Total size: 20 M
+Installed size: 78 M
Is this ok [y/N]: y
```
@@ -126,19 +127,19 @@ CloudNativePG Plugin is currently built for the following
operating system and architectures:
* Linux
- * amd64
- * arm 5/6/7
- * arm64
- * s390x
- * ppc64le
+ * amd64
+ * arm 5/6/7
+ * arm64
+ * s390x
+ * ppc64le
* macOS
- * amd64
- * arm64
+ * amd64
+ * arm64
* Windows
- * 386
- * amd64
- * arm 5/6/7
- * arm64
+ * 386
+ * amd64
+ * arm 5/6/7
+ * arm64
### Configuring auto-completion
@@ -146,7 +147,7 @@ To configure auto-completion for the plugin, a helper shell script needs to be
installed into your current PATH. Assuming the latter contains `/usr/local/bin`,
this can be done with the following commands:
-```shell
+```sh
cat > kubectl_complete-cnpg <
```
@@ -185,7 +186,7 @@ installation namespace, namespaces to watch, and so on.
For details and available options, run:
-```shell
+```sh
kubectl cnpg install generate --help
```
@@ -206,7 +207,7 @@ The main options are:
An example of the `generate` command, which will generate a YAML manifest that
will install the operator, is as follows:
-```shell
+```sh
kubectl cnpg install generate \
-n king \
--version 1.23 \
@@ -246,11 +247,11 @@ cluster, including:
from the `Current LSN` field in the instances status as it is taken at
two different time intervals.
-```shell
+```sh
kubectl cnpg status sandbox
```
-```shell
+```output
Cluster Summary
Name: default/sandbox
System ID: 7423474350493388827
@@ -276,19 +277,19 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00
Instances status
Name Current LSN Replication role Status QoS Manager Version Node
---- ----------- ---------------- ------ --- --------------- ----
-sandbox-1 0/604DE38 Primary OK BestEffort 1.24.0 k8s-eu-worker
-sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2
-sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker
+sandbox-1 0/604DE38 Primary OK BestEffort 1.24.1 k8s-eu-worker
+sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2
+sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker
```
If you require more detailed status information, use the `--verbose` option (or
`-v` for short). The level of detail increases each time the flag is repeated:
-```shell
+```sh
kubectl cnpg status sandbox --verbose
```
-```shell
+```output
Cluster Summary
Name: default/sandbox
System ID: 7423474350493388827
@@ -332,9 +333,9 @@ sandbox-primary primary 1 1 1
Instances status
Name Current LSN Replication role Status QoS Manager Version Node
---- ----------- ---------------- ------ --- --------------- ----
-sandbox-1 0/6053720 Primary OK BestEffort 1.24.0 k8s-eu-worker
-sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2
-sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker
+sandbox-1 0/6053720 Primary OK BestEffort 1.24.1 k8s-eu-worker
+sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2
+sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker
```
With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can
@@ -347,13 +348,13 @@ The command also supports output in `yaml` and `json` format.
The meaning of this command is to `promote` a pod in the cluster to primary, so you
can start with maintenance work or test a switch-over situation in your cluster
-```shell
+```sh
kubectl cnpg promote cluster-example cluster-example-2
```
Or you can use the instance node number to promote
-```shell
+```sh
kubectl cnpg promote cluster-example 2
```
@@ -365,19 +366,19 @@ a TLS authentication certificate.
To get a certificate, you need to provide a name for the secret to store
the credentials, the cluster name, and a user for this certificate
-```shell
+```sh
kubectl cnpg certificate cluster-cert --cnpg-cluster cluster-example --cnpg-user appuser
```
After the secret it's created, you can get it using `kubectl`
-```shell
+```sh
kubectl get secret cluster-cert
```
And the content of the same in plain text using the following commands:
-```shell
+```sh
kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]'
```
@@ -394,7 +395,7 @@ The `kubectl cnpg restart` command can be used in two cases:
the cluster's primary or deleting and recreating the pod if
it is a replica.
-```shell
+```sh
# this command will restart a whole cluster in a rollout fashion
kubectl cnpg restart [clusterName]
@@ -418,7 +419,7 @@ to cluster dependent objects, such as ConfigMaps containing custom monitoring qu
The following command will reload all configurations for a given cluster:
-```shell
+```sh
kubectl cnpg reload [cluster_name]
```
@@ -443,13 +444,13 @@ all the cluster in the list.
If you want to set in maintenance all the PostgreSQL in your Kubernetes cluster,
just need to write the following command:
-```shell
+```sh
kubectl cnpg maintenance set --all-namespaces
```
And you'll have the list of all the cluster to update
-```shell
+```output
The following are the new values for the clusters
Namespace Cluster Name Maintenance reusePVC
--------- ------------ ----------- --------
@@ -501,32 +502,32 @@ default time-stamped filename is created for the zip file.
namespace as the clusters.
E.g. the default installation namespace is cnpg-system
-```shell
+```sh
kubectl cnpg report operator -n
```
results in
-```shell
+```output
Successfully written report to "report_operator_.zip" (format: "yaml")
```
With the `-f` flag set:
-```shell
+```sh
kubectl cnpg report operator -n -f reportRedacted.zip
```
Unzipping the file will produce a time-stamped top-level folder to keep the
directory tidy:
-```shell
+```sh
unzip reportRedacted.zip
```
will result in:
-```shell
+```output
Archive: reportRedacted.zip
creating: report_operator_/
creating: report_operator_/manifests/
@@ -542,7 +543,7 @@ Archive: reportRedacted.zip
If you activated the `--logs` option, you'd see an extra subdirectory:
-```shell
+```output
Archive: report_operator_.zip
creating: report_operator_/operator-logs/
@@ -555,14 +556,14 @@ Archive: report_operator_.zip
In all cases, it will also try to get the CURRENT operator logs. If current
and previous logs are available, it will show them both.
-``` json
+```output
====== Begin of Previous Log =====
-2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
====== End of Previous Log =====
-2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
```
@@ -571,7 +572,7 @@ and `====== End …` guards, with no content inside.
You can verify that the confidential information is REDACTED by default:
-```shell
+```sh
cd report_operator_/manifests/
head cnpg-ca-secret.yaml
```
@@ -590,18 +591,18 @@ metadata:
With the `-S` (`--stopRedaction`) option activated, secrets are shown:
-```shell
+```sh
kubectl cnpg report operator -n -f reportNonRedacted.zip -S
```
You'll get a reminder that you're about to view confidential information:
-```shell
+```output
WARNING: secret Redaction is OFF. Use it with caution
Successfully written report to "reportNonRedacted.zip" (format: "yaml")
```
-```shell
+```sh
unzip reportNonRedacted.zip
head cnpg-ca-secret.yaml
```
@@ -639,7 +640,7 @@ so the `-S` is disabled.
Usage:
-```shell
+```sh
kubectl cnpg report cluster [flags]
```
@@ -647,17 +648,17 @@ Note that, unlike the `operator` sub-command, for the `cluster` sub-command you
need to provide the cluster name, and very likely the namespace, unless the cluster
is in the default one.
-```shell
+```sh
kubectl cnpg report cluster example -f report.zip -n example_namespace
```
and then:
-```shell
+```sh
unzip report.zip
```
-```shell
+```output
Archive: report.zip
creating: report_cluster_example_/
creating: report_cluster_example_/manifests/
@@ -669,21 +670,21 @@ Archive: report.zip
Remember that you can use the `--logs` flag to add the pod and job logs to the ZIP.
-```shell
+```sh
kubectl cnpg report cluster example -n example_namespace --logs
```
will result in:
-```shell
+```output
Successfully written report to "report_cluster_example_.zip" (format: "yaml")
```
-```shell
+```sh
unzip report_cluster_.zip
```
-```shell
+```output
Archive: report_cluster_example_.zip
creating: report_cluster_example_/
creating: report_cluster_example_/manifests/
@@ -718,7 +719,7 @@ the `-h` flag:
`kubectl cnpg logs cluster -h`
The `logs` command will display logs in JSON-lines format, unless the
-`--timestamps` flag is used, in which case, a human readable timestamp will be
+`--timestamps` flag is used, in which case, a human-readable timestamp will be
prepended to each line. In this case, lines will no longer be valid JSON,
and tools such as `jq` may not work as desired.
@@ -741,7 +742,7 @@ The `--tail` flag can be used to specify how many log lines will be retrieved
from each pod in the cluster. By default, the `logs cluster` sub-command will
display all the logs from each pod in the cluster. If combined with the "follow"
flag `-f`, the number of logs specified by `--tail` will be retrieved until the
-current time, and and from then the new logs will be followed.
+current time, and from then the new logs will be followed.
NOTE: unlike other `cnpg` plugin commands, the `-f` is used to denote "follow"
rather than specify a file. This keeps with the convention of `kubectl logs`,
@@ -749,24 +750,24 @@ which takes `-f` to mean the logs should be followed.
Usage:
-```shell
+```sh
kubectl cnpg logs cluster [flags]
```
Using the `-f` option to follow:
-```shell
+```sh
kubectl cnpg report cluster cluster-example -f
```
Using `--tail` option to display 3 lines from each pod and the `-f` option
to follow:
-```shell
+```sh
kubectl cnpg report cluster cluster-example -f --tail 3
```
-``` json
+```output
{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] LOG: ending log output to stderr","source":"/controller/log/postgres","logging_pod":"cluster-example-3"}
{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] HINT: Future log output will go to log destination \"csvlog\".","source":"/controller/log/postgres","logging_pod":"cluster-example-3"}
…
@@ -775,8 +776,8 @@ kubectl cnpg report cluster cluster-example -f --tail 3
With the `-o` option omitted, and with `--output` specified:
-``` sh
-kubectl cnpg logs cluster cluster-example --output my-cluster.log
+```console
+$ kubectl cnpg logs cluster cluster-example --output my-cluster.log
Successfully written logs to "my-cluster.log"
```
@@ -789,7 +790,7 @@ into a human-readable output, and attempts to sort the entries by timestamp.
It can be used in combination with `kubectl cnpg logs cluster`, as
shown in the following example:
-``` sh
+```console
$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL
@@ -802,7 +803,7 @@ Alternatively, it can be used in combination with other commands that produce
CNPG logs in JSON format, such as `stern`, or `kubectl logs`, as in the
following example:
-``` sh
+```console
$ kubectl logs cluster-example-1 | kubectl cnpg logs pretty
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL
@@ -816,7 +817,7 @@ to display logs for specific pods or loggers, or to filter logs by severity
level.
Here's an example:
-``` sh
+```console
$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --pods cluster-example-1 --loggers postgres --log-level info
2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: redirecting log output to logging collector process
2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] HINT: Future log output will appear in directory "/controller/log"...
@@ -833,7 +834,7 @@ mode. The sub-command will add a group separator line, `---`, at the end of
each sorted group. The size of the grouping can be configured via the
`--sorting-group-size` flag (default: 1000), as illustrated in the following example:
-``` sh
+```console
$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --sorting-group-size=3
2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Starting CloudNativePG Instance Manager
2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Checking for free disk space for WALs before starting PostgreSQL
@@ -867,14 +868,14 @@ detached PVCs.
Usage:
-```
+```sh
kubectl cnpg destroy [CLUSTER_NAME] [INSTANCE_ID]
```
The following example removes the `cluster-example-2` pod and the associated
PVCs:
-```
+```sh
kubectl cnpg destroy cluster-example 2
```
@@ -893,7 +894,7 @@ instance.
You can hibernate a cluster with:
-```
+```sh
kubectl cnpg hibernate on
```
@@ -916,13 +917,13 @@ available status, including content from `pg_controldata`.
In case of error the operator will not be able to revert the procedure. You can
still force the operation with:
-```
+```sh
kubectl cnpg hibernate on cluster-example --force
```
A hibernated cluster can be resumed with:
-```
+```sh
kubectl cnpg hibernate off
```
@@ -930,7 +931,7 @@ Once the cluster has been hibernated, it's possible to show the last
configuration and the status that PostgreSQL had after it was shut down.
That can be done with:
-```
+```sh
kubectl cnpg hibernate status
```
@@ -939,7 +940,7 @@ kubectl cnpg hibernate status
Pgbench can be run against an existing PostgreSQL cluster with following
command:
-```
+```sh
kubectl cnpg pgbench -- --time 30 --client 1 --jobs 1
```
@@ -950,7 +951,7 @@ details.
fio can be run on an existing storage class with following command:
-```
+```sh
kubectl cnpg fio -n
```
@@ -963,20 +964,20 @@ an existing Postgres cluster by creating a new `Backup` resource.
The following example requests an on-demand backup for a given cluster:
-```shell
+```sh
kubectl cnpg backup [cluster_name]
```
or, if using volume snapshots:
-```shell
+```sh
kubectl cnpg backup [cluster_name] -m volumeSnapshot
```
The created backup will be named after the request time:
-```shell
-kubectl cnpg backup cluster-example
+```console
+$ kubectl cnpg backup cluster-example
backup/cluster-example-20230121002300 created
```
@@ -1002,8 +1003,8 @@ it from the actual pod. This means that you will be using the `postgres` user.
As you will be connecting as `postgres` user, in production environments this
method should be used with extreme care, by authorized personnel only.
-```shell
-kubectl cnpg psql cluster-example
+```console
+$ kubectl cnpg psql cluster-example
psql (17.0 (Debian 17.0-1.pgdg110+1))
Type "help" for help.
@@ -1014,8 +1015,9 @@ postgres=#
By default, the command will connect to the primary instance. The user can
select to work against a replica by using the `--replica` option:
-```shell
-kubectl cnpg psql --replica cluster-example
+```console
+$ kubectl cnpg psql --replica cluster-example
+
psql (17.0 (Debian 17.0-1.pgdg110+1))
Type "help" for help.
@@ -1181,7 +1183,7 @@ to `source-cluster`.
We can run:
-``` sh
+```sh
kubectl cnpg publication create destination-cluster \
--external-cluster=source-cluster --all-tables
```
@@ -1191,7 +1193,7 @@ the SQL commands on the `destination-cluster`.
Or instead, we can run:
-``` sh
+```sh
kubectl cnpg publication create source-cluster \
--publication=app --all-tables
```
@@ -1276,7 +1278,7 @@ As in the section on publications, we have a `source-cluster` and a
The following command:
-``` sh
+```sh
kubectl cnpg subscription create destination-cluster \
--external-cluster=source-cluster \
--publication=app --subscription=app
@@ -1350,7 +1352,7 @@ subscription, both called `app`, are already present.
The following command will synchronize the sequences involved in the
`app` subscription, from the source cluster into the destination cluster.
-``` sh
+```sh
kubectl cnpg subscription sync-sequences destination-cluster \
--subscription=app
```
diff --git a/hack/release.sh b/hack/release.sh
index 4b5802d83b..0c0fc0596b 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -106,6 +106,8 @@ sed -i -e "s@release-[0-9.]*/releases/cnpg-[0-9.]*.yaml@${branch}/releases/cnpg-
-e "s@artifacts/release-[0-9.]*/@artifacts/${branch}/@g" \
docs/src/installation_upgrade.md
+sed -i -e "s@1\.[0-9]\+\.[0-9]\+@${release_version}@g" docs/src/kubectl-plugin.md
+
CONFIG_TMP_DIR=$(mktemp -d)
cp -r config/* "${CONFIG_TMP_DIR}"
(
From 3cfb17d6df27657fe3dfbbbaf56a20142a5d25ef Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Thu, 7 Nov 2024 12:32:45 +0100
Subject: [PATCH 137/836] refactor: simplify replication slots code, clarify
tests (#6003)
- eliminate the "Postgres manager" and use `sql.DB` as parameters instead.
- rewrite the unit tests with sqlmock.
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
.../controller/instance_controller.go | 7 +-
.../slots/infrastructure/contract.go | 36 ---
.../slots/infrastructure/postgresmanager.go | 54 +---
.../infrastructure/postgresmanager_test.go | 72 ++---
.../slots/infrastructure/suite_test.go | 22 --
.../slots/reconciler/replicationslot.go | 21 +-
.../slots/reconciler/replicationslot_test.go | 232 ++++++++---------
.../controller/slots/runner/runner.go | 39 +--
.../controller/slots/runner/runner_test.go | 245 ++++++++----------
9 files changed, 304 insertions(+), 424 deletions(-)
delete mode 100644 internal/management/controller/slots/infrastructure/contract.go
diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go
index 71f207bb17..9a48a4c849 100644
--- a/internal/management/controller/instance_controller.go
+++ b/internal/management/controller/instance_controller.go
@@ -44,7 +44,6 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/controller"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles"
- "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/infrastructure"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/reconciler"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
@@ -238,10 +237,14 @@ func (r *InstanceReconciler) Reconcile(
r.configureSlotReplicator(cluster)
+ postgresDB, err := r.instance.ConnectionPool().Connection("postgres")
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("while getting the postgres connection: %w", err)
+ }
if result, err := reconciler.ReconcileReplicationSlots(
ctx,
r.instance.GetPodName(),
- infrastructure.NewPostgresManager(r.instance.ConnectionPool()),
+ postgresDB,
cluster,
); err != nil || !result.IsZero() {
return result, err
diff --git a/internal/management/controller/slots/infrastructure/contract.go b/internal/management/controller/slots/infrastructure/contract.go
deleted file mode 100644
index d0e3d0d992..0000000000
--- a/internal/management/controller/slots/infrastructure/contract.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package infrastructure
-
-import (
- "context"
-
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
-)
-
-// Manager abstracts the operations that need to be sent to
-// the database instance for the management of Replication Slots
-type Manager interface {
- // List the available replication slots
- List(ctx context.Context, config *apiv1.ReplicationSlotsConfiguration) (ReplicationSlotList, error)
- // Update the replication slot
- Update(ctx context.Context, slot ReplicationSlot) error
- // Create the replication slot
- Create(ctx context.Context, slot ReplicationSlot) error
- // Delete the replication slot
- Delete(ctx context.Context, slot ReplicationSlot) error
-}
diff --git a/internal/management/controller/slots/infrastructure/postgresmanager.go b/internal/management/controller/slots/infrastructure/postgresmanager.go
index 74360cf783..726a33986f 100644
--- a/internal/management/controller/slots/infrastructure/postgresmanager.go
+++ b/internal/management/controller/slots/infrastructure/postgresmanager.go
@@ -18,40 +18,16 @@ package infrastructure
import (
"context"
+ "database/sql"
"strings"
"github.com/cloudnative-pg/machinery/pkg/log"
v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool"
)
-// PostgresManager is a Manager for a database instance
-type PostgresManager struct {
- pool pool.Pooler
-}
-
-// NewPostgresManager returns an implementation of Manager for postgres
-func NewPostgresManager(pool pool.Pooler) Manager {
- return PostgresManager{
- pool: pool,
- }
-}
-
-func (sm PostgresManager) String() string {
- return sm.pool.GetDsn("postgres")
-}
-
// List the available replication slots
-func (sm PostgresManager) List(
- ctx context.Context,
- config *v1.ReplicationSlotsConfiguration,
-) (ReplicationSlotList, error) {
- db, err := sm.pool.Connection("postgres")
- if err != nil {
- return ReplicationSlotList{}, err
- }
-
+func List(ctx context.Context, db *sql.DB, config *v1.ReplicationSlotsConfiguration) (ReplicationSlotList, error) {
rows, err := db.QueryContext(
ctx,
`SELECT slot_name, slot_type, active, coalesce(restart_lsn::TEXT, '') AS restart_lsn,
@@ -100,49 +76,35 @@ func (sm PostgresManager) List(
}
// Update the replication slot
-func (sm PostgresManager) Update(ctx context.Context, slot ReplicationSlot) error {
+func Update(ctx context.Context, db *sql.DB, slot ReplicationSlot) error {
contextLog := log.FromContext(ctx).WithName("updateSlot")
contextLog.Trace("Invoked", "slot", slot)
if slot.RestartLSN == "" {
return nil
}
- db, err := sm.pool.Connection("postgres")
- if err != nil {
- return err
- }
- _, err = db.ExecContext(ctx, "SELECT pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN)
+ _, err := db.ExecContext(ctx, "SELECT pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN)
return err
}
// Create the replication slot
-func (sm PostgresManager) Create(ctx context.Context, slot ReplicationSlot) error {
+func Create(ctx context.Context, db *sql.DB, slot ReplicationSlot) error {
contextLog := log.FromContext(ctx).WithName("createSlot")
contextLog.Trace("Invoked", "slot", slot)
- db, err := sm.pool.Connection("postgres")
- if err != nil {
- return err
- }
-
- _, err = db.ExecContext(ctx, "SELECT pg_create_physical_replication_slot($1, $2)",
+ _, err := db.ExecContext(ctx, "SELECT pg_create_physical_replication_slot($1, $2)",
slot.SlotName, slot.RestartLSN != "")
return err
}
// Delete the replication slot
-func (sm PostgresManager) Delete(ctx context.Context, slot ReplicationSlot) error {
+func Delete(ctx context.Context, db *sql.DB, slot ReplicationSlot) error {
contextLog := log.FromContext(ctx).WithName("dropSlot")
contextLog.Trace("Invoked", "slot", slot)
if slot.Active {
return nil
}
- db, err := sm.pool.Connection("postgres")
- if err != nil {
- return err
- }
-
- _, err = db.ExecContext(ctx, "SELECT pg_drop_replication_slot($1)", slot.SlotName)
+ _, err := db.ExecContext(ctx, "SELECT pg_drop_replication_slot($1)", slot.SlotName)
return err
}
diff --git a/internal/management/controller/slots/infrastructure/postgresmanager_test.go b/internal/management/controller/slots/infrastructure/postgresmanager_test.go
index 251832847c..5fdbf41718 100644
--- a/internal/management/controller/slots/infrastructure/postgresmanager_test.go
+++ b/internal/management/controller/slots/infrastructure/postgresmanager_test.go
@@ -17,7 +17,6 @@ limitations under the License.
package infrastructure
import (
- "context"
"database/sql"
"errors"
@@ -31,17 +30,15 @@ import (
var _ = Describe("PostgresManager", func() {
var (
- manager Manager
- mock sqlmock.Sqlmock
- db *sql.DB
- slot ReplicationSlot
+ mock sqlmock.Sqlmock
+ db *sql.DB
+ slot ReplicationSlot
)
BeforeEach(func() {
var err error
db, mock, err = sqlmock.New()
Expect(err).NotTo(HaveOccurred())
- manager = NewPostgresManager(&mockPooler{db: db})
slot = ReplicationSlot{
SlotName: "slot1",
Type: SlotTypePhysical,
@@ -55,26 +52,29 @@ var _ = Describe("PostgresManager", func() {
})
Context("Create", func() {
- It("should successfully create a replication slot", func() {
- mock.ExpectExec("SELECT pg_create_physical_replication_slot").
+ const expectedSQL = "SELECT pg_create_physical_replication_slot"
+ It("should successfully create a replication slot", func(ctx SpecContext) {
+ mock.ExpectExec(expectedSQL).
WithArgs(slot.SlotName, slot.RestartLSN != "").
WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.Create(context.Background(), slot)
+ err := Create(ctx, db, slot)
Expect(err).NotTo(HaveOccurred())
})
- It("should return error when the database execution fails", func() {
- mock.ExpectExec("SELECT pg_create_physical_replication_slot").
+ It("should return error when the database execution fails", func(ctx SpecContext) {
+ mock.ExpectExec(expectedSQL).
WithArgs(slot.SlotName, slot.RestartLSN != "").
WillReturnError(errors.New("mock error"))
- err := manager.Create(context.Background(), slot)
+ err := Create(ctx, db, slot)
Expect(err).To(HaveOccurred())
})
})
Context("List", func() {
+ const expectedSQL = "^SELECT (.+) FROM pg_replication_slots"
+
var config *v1.ReplicationSlotsConfiguration
BeforeEach(func() {
config = &v1.ReplicationSlotsConfiguration{
@@ -86,15 +86,15 @@ var _ = Describe("PostgresManager", func() {
}
})
- It("should successfully list replication slots", func() {
+ It("should successfully list replication slots", func(ctx SpecContext) {
rows := sqlmock.NewRows([]string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"}).
AddRow("_cnpg_slot1", string(SlotTypePhysical), true, "lsn1", false).
AddRow("slot2", string(SlotTypePhysical), true, "lsn2", false)
- mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
+ mock.ExpectQuery(expectedSQL).
WillReturnRows(rows)
- result, err := manager.List(context.Background(), config)
+ result, err := List(ctx, db, config)
Expect(err).NotTo(HaveOccurred())
Expect(result.Items).To(HaveLen(2))
Expect(result.Has("_cnpg_slot1")).To(BeTrue())
@@ -113,65 +113,69 @@ var _ = Describe("PostgresManager", func() {
Expect(slot2.IsHA).To(BeFalse())
})
- It("should return error when database query fails", func() {
- mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
+ It("should return error when database query fails", func(ctx SpecContext) {
+ mock.ExpectQuery(expectedSQL).
WillReturnError(errors.New("mock error"))
- _, err := manager.List(context.Background(), config)
+ _, err := List(ctx, db, config)
Expect(err).To(HaveOccurred())
})
})
Context("Update", func() {
- It("should successfully update a replication slot", func() {
- mock.ExpectExec("SELECT pg_replication_slot_advance").
+ const expectedSQL = "SELECT pg_replication_slot_advance"
+
+ It("should successfully update a replication slot", func(ctx SpecContext) {
+ mock.ExpectExec(expectedSQL).
WithArgs(slot.SlotName, slot.RestartLSN).
WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.Update(context.Background(), slot)
+ err := Update(ctx, db, slot)
Expect(err).NotTo(HaveOccurred())
})
- It("should return error when the database execution fails", func() {
- mock.ExpectExec("SELECT pg_replication_slot_advance").
+ It("should return error when the database execution fails", func(ctx SpecContext) {
+ mock.ExpectExec(expectedSQL).
WithArgs(slot.SlotName, slot.RestartLSN).
WillReturnError(errors.New("mock error"))
- err := manager.Update(context.Background(), slot)
+ err := Update(ctx, db, slot)
Expect(err).To(HaveOccurred())
})
- It("should not update a replication slot when RestartLSN is empty", func() {
+ It("should not update a replication slot when RestartLSN is empty", func(ctx SpecContext) {
slot.RestartLSN = ""
- err := manager.Update(context.Background(), slot)
+ err := Update(ctx, db, slot)
Expect(err).NotTo(HaveOccurred())
})
})
Context("Delete", func() {
- It("should successfully delete a replication slot", func() {
+ const expectedSQL = "SELECT pg_drop_replication_slot"
+
+ It("should successfully delete a replication slot", func(ctx SpecContext) {
slot.Active = false
- mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot.SlotName).
+ mock.ExpectExec(expectedSQL).WithArgs(slot.SlotName).
WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.Delete(context.Background(), slot)
+ err := Delete(ctx, db, slot)
Expect(err).NotTo(HaveOccurred())
})
- It("should return error when the database execution fails", func() {
+ It("should return error when the database execution fails", func(ctx SpecContext) {
slot.Active = false
- mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot.SlotName).
+ mock.ExpectExec(expectedSQL).WithArgs(slot.SlotName).
WillReturnError(errors.New("mock error"))
- err := manager.Delete(context.Background(), slot)
+ err := Delete(ctx, db, slot)
Expect(err).To(HaveOccurred())
})
- It("should not delete an active replication slot", func() {
+ It("should not delete an active replication slot", func(ctx SpecContext) {
slot.RestartLSN = ""
- err := manager.Delete(context.Background(), slot)
+ err := Delete(ctx, db, slot)
Expect(err).NotTo(HaveOccurred())
})
})
diff --git a/internal/management/controller/slots/infrastructure/suite_test.go b/internal/management/controller/slots/infrastructure/suite_test.go
index ec8b6e54af..30bf0edf16 100644
--- a/internal/management/controller/slots/infrastructure/suite_test.go
+++ b/internal/management/controller/slots/infrastructure/suite_test.go
@@ -17,8 +17,6 @@ limitations under the License.
package infrastructure
import (
- "database/sql"
- "errors"
"testing"
. "github.com/onsi/ginkgo/v2"
@@ -29,23 +27,3 @@ func TestReconciler(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Internal Management Controller Slots Infrastructure Suite")
}
-
-// mockPooler is a mock implementation of the Pooler interface
-type mockPooler struct {
- db *sql.DB
-}
-
-func (mp *mockPooler) Connection(_ string) (*sql.DB, error) {
- if mp.db == nil {
- return nil, errors.New("connection error")
- }
- return mp.db, nil
-}
-
-func (mp *mockPooler) GetDsn(_ string) string {
- return "mocked DSN"
-}
-
-func (mp *mockPooler) ShutdownConnections() {
- // no-op in mock
-}
diff --git a/internal/management/controller/slots/reconciler/replicationslot.go b/internal/management/controller/slots/reconciler/replicationslot.go
index 7871358414..6d7382330b 100644
--- a/internal/management/controller/slots/reconciler/replicationslot.go
+++ b/internal/management/controller/slots/reconciler/replicationslot.go
@@ -18,6 +18,7 @@ package reconciler
import (
"context"
+ "database/sql"
"fmt"
"time"
@@ -32,7 +33,7 @@ import (
func ReconcileReplicationSlots(
ctx context.Context,
instanceName string,
- manager infrastructure.Manager,
+ db *sql.DB,
cluster *apiv1.Cluster,
) (reconcile.Result, error) {
if cluster.Spec.ReplicationSlots == nil ||
@@ -48,11 +49,11 @@ func ReconcileReplicationSlots(
// we also clean up the slots that fall under the user defined replication slots feature here.
// TODO: split-out user defined replication slots code
if !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() {
- return dropReplicationSlots(ctx, manager, cluster, isPrimary)
+ return dropReplicationSlots(ctx, db, cluster, isPrimary)
}
if isPrimary {
- return reconcilePrimaryHAReplicationSlots(ctx, manager, cluster)
+ return reconcilePrimaryHAReplicationSlots(ctx, db, cluster)
}
return reconcile.Result{}, nil
@@ -61,13 +62,13 @@ func ReconcileReplicationSlots(
// reconcilePrimaryHAReplicationSlots reconciles the HA replication slots of the primary instance
func reconcilePrimaryHAReplicationSlots(
ctx context.Context,
- manager infrastructure.Manager,
+ db *sql.DB,
cluster *apiv1.Cluster,
) (reconcile.Result, error) {
contextLogger := log.FromContext(ctx)
contextLogger.Debug("Updating primary HA replication slots")
- currentSlots, err := manager.List(ctx, cluster.Spec.ReplicationSlots)
+ currentSlots, err := infrastructure.List(ctx, db, cluster.Spec.ReplicationSlots)
if err != nil {
return reconcile.Result{}, fmt.Errorf("reconciling primary replication slots: %w", err)
}
@@ -88,7 +89,7 @@ func reconcilePrimaryHAReplicationSlots(
}
// At this point, the cluster instance does not have a HA replication slot
- if err := manager.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotName}); err != nil {
+ if err := infrastructure.Create(ctx, db, infrastructure.ReplicationSlot{SlotName: slotName}); err != nil {
return reconcile.Result{}, fmt.Errorf("creating primary HA replication slots: %w", err)
}
}
@@ -115,7 +116,7 @@ func reconcilePrimaryHAReplicationSlots(
}
contextLogger.Trace("Attempt to delete replication slot",
"slot", slot)
- if err := manager.Delete(ctx, slot); err != nil {
+ if err := infrastructure.Delete(ctx, db, slot); err != nil {
return reconcile.Result{}, fmt.Errorf("failure deleting replication slot %q: %w", slot.SlotName, err)
}
}
@@ -133,7 +134,7 @@ func reconcilePrimaryHAReplicationSlots(
// we also clean up the slots that fall under the user defined replication slots feature here.
func dropReplicationSlots(
ctx context.Context,
- manager infrastructure.Manager,
+ db *sql.DB,
cluster *apiv1.Cluster,
isPrimary bool,
) (reconcile.Result, error) {
@@ -144,7 +145,7 @@ func dropReplicationSlots(
dropUserSlots := !cluster.Spec.ReplicationSlots.SynchronizeReplicas.GetEnabled()
// we fetch all replication slots
- slots, err := manager.List(ctx, cluster.Spec.ReplicationSlots)
+ slots, err := infrastructure.List(ctx, db, cluster.Spec.ReplicationSlots)
if err != nil {
return reconcile.Result{}, err
}
@@ -169,7 +170,7 @@ func dropReplicationSlots(
}
contextLogger.Trace("Attempt to delete replication slot",
"slot", slot)
- if err := manager.Delete(ctx, slot); err != nil {
+ if err := infrastructure.Delete(ctx, db, slot); err != nil {
return reconcile.Result{}, fmt.Errorf("while disabling standby HA replication slots: %w", err)
}
}
diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go
index 8e90f2d068..c124597df7 100644
--- a/internal/management/controller/slots/reconciler/replicationslot_test.go
+++ b/internal/management/controller/slots/reconciler/replicationslot_test.go
@@ -17,11 +17,12 @@ limitations under the License.
package reconciler
import (
- "context"
+ "database/sql"
+ "database/sql/driver"
"errors"
- "strings"
"time"
+ "github.com/DATA-DOG/go-sqlmock"
"k8s.io/utils/ptr"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -31,58 +32,9 @@ import (
. "github.com/onsi/gomega"
)
-type fakeSlot struct {
- name string
- active bool
- isHA bool
-}
-
-type fakeReplicationSlotManager struct {
- replicationSlots map[fakeSlot]bool
- triggerListError bool
- triggerDeleteError bool
-}
-
const slotPrefix = "_cnpg_"
-func (fk fakeReplicationSlotManager) Create(_ context.Context, slot infrastructure.ReplicationSlot) error {
- isHA := strings.HasPrefix(slot.SlotName, slotPrefix)
- fk.replicationSlots[fakeSlot{name: slot.SlotName, isHA: isHA}] = true
- return nil
-}
-
-func (fk fakeReplicationSlotManager) Delete(_ context.Context, slot infrastructure.ReplicationSlot) error {
- if fk.triggerDeleteError {
- return errors.New("triggered delete error")
- }
- delete(fk.replicationSlots, fakeSlot{name: slot.SlotName, active: slot.Active, isHA: slot.IsHA})
- return nil
-}
-
-func (fk fakeReplicationSlotManager) Update(_ context.Context, _ infrastructure.ReplicationSlot) error {
- return nil
-}
-
-func (fk fakeReplicationSlotManager) List(
- _ context.Context,
- _ *apiv1.ReplicationSlotsConfiguration,
-) (infrastructure.ReplicationSlotList, error) {
- var slotList infrastructure.ReplicationSlotList
- if fk.triggerListError {
- return slotList, errors.New("triggered list error")
- }
-
- for slot := range fk.replicationSlots {
- slotList.Items = append(slotList.Items, infrastructure.ReplicationSlot{
- SlotName: slot.name,
- RestartLSN: "",
- Type: infrastructure.SlotTypePhysical,
- Active: slot.active,
- IsHA: slot.isHA,
- })
- }
- return slotList, nil
-}
+var repSlotColumns = []string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"}
func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1.Cluster {
return apiv1.Cluster{
@@ -102,134 +54,156 @@ func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1.
}
}
+func newRepSlot(name string, active bool, restartLSN string) []driver.Value {
+ return []driver.Value{
+ slotPrefix + name, string(infrastructure.SlotTypePhysical), active, restartLSN, false,
+ }
+}
+
var _ = Describe("HA Replication Slots reconciliation in Primary", func() {
+ var (
+ db *sql.DB
+ mock sqlmock.Sqlmock
+ )
+ BeforeEach(func() {
+ var err error
+ db, mock, err = sqlmock.New()
+ Expect(err).NotTo(HaveOccurred())
+ })
+ AfterEach(func() {
+ Expect(mock.ExpectationsWereMet()).To(Succeed())
+ })
It("can create a new replication slot for a new cluster instance", func(ctx SpecContext) {
- fakeSlotManager := fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: slotPrefix + "instance1", isHA: true}: true,
- {name: slotPrefix + "instance2", isHA: true}: true,
- },
- }
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow(newRepSlot("instance1", true, "lsn1")...).
+ AddRow(newRepSlot("instance2", true, "lsn2")...)
- cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2", "instance3"}, "instance1")
+ mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
+ WillReturnRows(rows)
- Expect(fakeSlotManager.replicationSlots).To(HaveLen(2))
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeTrue())
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue())
+ mock.ExpectExec("SELECT pg_create_physical_replication_slot").
+ WithArgs(slotPrefix+"instance3", false).
+ WillReturnResult(sqlmock.NewResult(1, 1))
- _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster)
+ cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2", "instance3"}, "instance1")
+
+ _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster)
Expect(err).ShouldNot(HaveOccurred())
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeFalse())
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeTrue())
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue())
- Expect(fakeSlotManager.replicationSlots).To(HaveLen(2))
})
It("can delete an inactive HA replication slot that is not in the cluster", func(ctx SpecContext) {
- fakeSlotManager := fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: slotPrefix + "instance1", isHA: true}: true,
- {name: slotPrefix + "instance2", isHA: true}: true,
- {name: slotPrefix + "instance3", isHA: true}: true,
- },
- }
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow(newRepSlot("instance1", true, "lsn1")...).
+ AddRow(newRepSlot("instance2", true, "lsn2")...).
+ AddRow(newRepSlot("instance3", false, "lsn2")...)
- cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1")
+ mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
+ WillReturnRows(rows)
- Expect(fakeSlotManager.replicationSlots).To(HaveLen(3))
+ mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance3").
+ WillReturnResult(sqlmock.NewResult(1, 1))
- _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster)
+ cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1")
+
+ _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster)
Expect(err).ShouldNot(HaveOccurred())
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeFalse())
- Expect(fakeSlotManager.replicationSlots).To(HaveLen(1))
})
It("will not delete an active HA replication slot that is not in the cluster", func(ctx SpecContext) {
- fakeSlotManager := fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: slotPrefix + "instance1", isHA: true}: true,
- {name: slotPrefix + "instance2", isHA: true}: true,
- {name: slotPrefix + "instance3", isHA: true, active: true}: true,
- },
- }
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow(newRepSlot("instance1", true, "lsn1")...).
+ AddRow(newRepSlot("instance2", true, "lsn2")...).
+ AddRow(newRepSlot("instance3", true, "lsn2")...)
- cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1")
+ mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
+ WillReturnRows(rows)
- Expect(fakeSlotManager.replicationSlots).To(HaveLen(3))
+ cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1")
- _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster)
+ _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster)
Expect(err).ShouldNot(HaveOccurred())
- Expect(fakeSlotManager.replicationSlots[fakeSlot{name: slotPrefix + "instance3", isHA: true, active: true}]).
- To(BeTrue())
- Expect(fakeSlotManager.replicationSlots).To(HaveLen(2))
})
})
var _ = Describe("dropReplicationSlots", func() {
- It("returns error when listing slots fails", func() {
- fakeManager := &fakeReplicationSlotManager{
- replicationSlots: make(map[fakeSlot]bool),
- triggerListError: true,
- }
+ const selectPgRepSlot = "^SELECT (.+) FROM pg_replication_slots"
+
+ var (
+ db *sql.DB
+ mock sqlmock.Sqlmock
+ )
+ BeforeEach(func() {
+ var err error
+ db, mock, err = sqlmock.New()
+ Expect(err).NotTo(HaveOccurred())
+ })
+ AfterEach(func() {
+ Expect(mock.ExpectationsWereMet()).To(Succeed())
+ })
+
+ It("returns error when listing slots fails", func(ctx SpecContext) {
cluster := makeClusterWithInstanceNames([]string{}, "")
- _, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true)
+ mock.ExpectQuery(selectPgRepSlot).WillReturnError(errors.New("triggered list error"))
+
+ _, err := dropReplicationSlots(ctx, db, &cluster, true)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("triggered list error"))
})
- It("skips deletion of active HA slots and reschedules", func() {
- fakeManager := &fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: "slot1", active: true, isHA: true}: true,
- },
- }
+ It("skips deletion of active HA slots and reschedules", func(ctx SpecContext) {
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow(newRepSlot("instance1", true, "lsn1")...)
+ mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows)
+
cluster := makeClusterWithInstanceNames([]string{}, "")
- res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true)
+ res, err := dropReplicationSlots(ctx, db, &cluster, true)
Expect(err).NotTo(HaveOccurred())
Expect(res.RequeueAfter).To(Equal(time.Second))
})
- It("skips the deletion of user defined replication slots on the primary", func() {
- fakeManager := &fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: "slot1", active: true}: true,
- },
- }
+ It("skips the deletion of user defined replication slots on the primary", func(ctx SpecContext) {
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow("custom-slot", string(infrastructure.SlotTypePhysical), true, "lsn1", false)
+ mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
+ WillReturnRows(rows)
+
cluster := makeClusterWithInstanceNames([]string{}, "")
- res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true)
+ res, err := dropReplicationSlots(ctx, db, &cluster, true)
Expect(err).NotTo(HaveOccurred())
Expect(res.RequeueAfter).To(Equal(time.Duration(0)))
Expect(res.IsZero()).To(BeTrue())
})
- It("returns error when deleting a slot fails", func() {
- fakeManager := &fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: "slot1", active: false, isHA: true}: true,
- },
- triggerDeleteError: true,
- }
+ It("returns error when deleting a slot fails", func(ctx SpecContext) {
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow(newRepSlot("instance1", false, "lsn1")...)
+ mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows)
+
+ mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance1").
+ WillReturnError(errors.New("delete error"))
+
cluster := makeClusterWithInstanceNames([]string{}, "")
- _, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true)
+ _, err := dropReplicationSlots(ctx, db, &cluster, true)
Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("triggered delete error"))
+ Expect(err.Error()).To(ContainSubstring("delete error"))
})
- It("deletes inactive slots and does not reschedule", func() {
- fakeManager := &fakeReplicationSlotManager{
- replicationSlots: map[fakeSlot]bool{
- {name: "slot1", active: false, isHA: true}: true,
- },
- }
+ It("deletes inactive slots and does not reschedule", func(ctx SpecContext) {
+ rows := sqlmock.NewRows(repSlotColumns).
+ AddRow(newRepSlot("instance1", false, "lsn1")...)
+ mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows)
+
+ mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance1").
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
cluster := makeClusterWithInstanceNames([]string{}, "")
- res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true)
+ res, err := dropReplicationSlots(ctx, db, &cluster, true)
Expect(err).NotTo(HaveOccurred())
Expect(res.RequeueAfter).To(Equal(time.Duration(0)))
- Expect(fakeManager.replicationSlots).NotTo(HaveKey(fakeSlot{name: "slot1", active: false}))
})
})
diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go
index 221a5195e0..9b200c76af 100644
--- a/internal/management/controller/slots/runner/runner.go
+++ b/internal/management/controller/slots/runner/runner.go
@@ -18,6 +18,7 @@ package runner
import (
"context"
+ "database/sql"
"fmt"
"time"
@@ -108,10 +109,23 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl
primaryPool := sr.instance.PrimaryConnectionPool()
localPool := sr.instance.ConnectionPool()
+ primaryDB, err := primaryPool.Connection("postgres")
+ if err != nil {
+ return err
+ }
+ localDB, err := localPool.Connection("postgres")
+ if err != nil {
+ return err
+ }
+ contextLog.Trace("Invoked",
+ "primary", primaryPool.GetDsn("postgres"),
+ "local", localPool.GetDsn("postgres"),
+ "podName", sr.instance.GetPodName(),
+ "config", config)
err = synchronizeReplicationSlots(
ctx,
- infrastructure.NewPostgresManager(primaryPool),
- infrastructure.NewPostgresManager(localPool),
+ primaryDB,
+ localDB,
sr.instance.GetPodName(),
config,
)
@@ -122,25 +136,20 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl
// nolint: gocognit
func synchronizeReplicationSlots(
ctx context.Context,
- primarySlotManager infrastructure.Manager,
- localSlotManager infrastructure.Manager,
+ primaryDB *sql.DB,
+ localDB *sql.DB,
podName string,
config *apiv1.ReplicationSlotsConfiguration,
) error {
contextLog := log.FromContext(ctx).WithName("synchronizeReplicationSlots")
- contextLog.Trace("Invoked",
- "primary", primarySlotManager,
- "local", localSlotManager,
- "podName", podName,
- "config", config)
- slotsInPrimary, err := primarySlotManager.List(ctx, config)
+ slotsInPrimary, err := infrastructure.List(ctx, primaryDB, config)
if err != nil {
return fmt.Errorf("getting replication slot status from primary: %v", err)
}
contextLog.Trace("primary slot status", "slotsInPrimary", slotsInPrimary)
- slotsInLocal, err := localSlotManager.List(ctx, config)
+ slotsInLocal, err := infrastructure.List(ctx, localDB, config)
if err != nil {
return fmt.Errorf("getting replication slot status from local: %v", err)
}
@@ -167,12 +176,12 @@ func synchronizeReplicationSlots(
}
if !slotsInLocal.Has(slot.SlotName) {
- err := localSlotManager.Create(ctx, slot)
+ err := infrastructure.Create(ctx, localDB, slot)
if err != nil {
return err
}
}
- err := localSlotManager.Update(ctx, slot)
+ err := infrastructure.Update(ctx, localDB, slot)
if err != nil {
return err
}
@@ -184,14 +193,14 @@ func synchronizeReplicationSlots(
// * slots holding xmin (this can happen on a former primary, and will prevent VACUUM from
// removing tuples deleted by any later transaction.)
if !slotsInPrimary.Has(slot.SlotName) || slot.SlotName == mySlotName || slot.HoldsXmin {
- if err := localSlotManager.Delete(ctx, slot); err != nil {
+ if err := infrastructure.Delete(ctx, localDB, slot); err != nil {
return err
}
}
// when the user turns off the feature we should delete all the created replication slots that aren't from HA
if !slot.IsHA && !config.SynchronizeReplicas.GetEnabled() {
- if err := localSlotManager.Delete(ctx, slot); err != nil {
+ if err := infrastructure.Delete(ctx, localDB, slot); err != nil {
return err
}
}
diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go
index df73585c72..87ebe69350 100644
--- a/internal/management/controller/slots/runner/runner_test.go
+++ b/internal/management/controller/slots/runner/runner_test.go
@@ -17,9 +17,9 @@ limitations under the License.
package runner
import (
- "context"
- "fmt"
+ "database/sql"
+ "github.com/DATA-DOG/go-sqlmock"
"k8s.io/utils/ptr"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -29,153 +29,138 @@ import (
. "github.com/onsi/gomega"
)
-type fakeSlot struct {
- name string
- restartLSN string
- holdsXmin bool
-}
-
-type fakeSlotManager struct {
- slots map[string]fakeSlot
- slotsUpdated int
- slotsCreated int
- slotsDeleted int
-}
-
-func (sm *fakeSlotManager) List(
- _ context.Context,
- _ *apiv1.ReplicationSlotsConfiguration,
-) (infrastructure.ReplicationSlotList, error) {
- var slotList infrastructure.ReplicationSlotList
- for _, slot := range sm.slots {
- slotList.Items = append(slotList.Items, infrastructure.ReplicationSlot{
- SlotName: slot.name,
- RestartLSN: slot.restartLSN,
- Type: infrastructure.SlotTypePhysical,
- Active: false,
- HoldsXmin: slot.holdsXmin,
- })
- }
- return slotList, nil
-}
-
-func (sm *fakeSlotManager) Update(_ context.Context, slot infrastructure.ReplicationSlot) error {
- localSlot, found := sm.slots[slot.SlotName]
- if !found {
- return fmt.Errorf("while updating slot: Slot %s not found", slot.SlotName)
- }
- if localSlot.restartLSN != slot.RestartLSN {
- sm.slots[slot.SlotName] = fakeSlot{name: slot.SlotName, restartLSN: slot.RestartLSN}
- sm.slotsUpdated++
- }
- return nil
-}
-
-func (sm *fakeSlotManager) Create(_ context.Context, slot infrastructure.ReplicationSlot) error {
- if _, found := sm.slots[slot.SlotName]; found {
- return fmt.Errorf("while creating slot: Slot %s already exists", slot.SlotName)
- }
- sm.slots[slot.SlotName] = fakeSlot{name: slot.SlotName, restartLSN: slot.RestartLSN}
- sm.slotsCreated++
- return nil
-}
-
-func (sm *fakeSlotManager) Delete(_ context.Context, slot infrastructure.ReplicationSlot) error {
- if _, found := sm.slots[slot.SlotName]; !found {
- return fmt.Errorf("while deleting slot: Slot %s not found", slot.SlotName)
- }
- delete(sm.slots, slot.SlotName)
- sm.slotsDeleted++
- return nil
-}
-
var _ = Describe("Slot synchronization", Ordered, func() {
- localPodName := "cluster-2"
- localSlotName := "_cnpg_cluster_2"
- slot3 := "cluster-3"
- slot4 := "cluster-4"
-
- primary := &fakeSlotManager{
- slots: map[string]fakeSlot{
- localSlotName: {name: localSlotName, restartLSN: "0/301C4D8"},
- slot3: {name: slot3, restartLSN: "0/302C4D8"},
- slot4: {name: slot4, restartLSN: "0/303C4D8"},
- },
- }
- local := &fakeSlotManager{
- slots: map[string]fakeSlot{},
- }
- config := apiv1.ReplicationSlotsConfiguration{
- HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
- Enabled: ptr.To(true),
- SlotPrefix: "_cnpg_",
- },
- }
+ const (
+ selectPgReplicationSlots = "^SELECT (.+) FROM pg_replication_slots"
+ selectPgSlotAdvance = "SELECT pg_replication_slot_advance"
+
+ localPodName = "cluster-2"
+ localSlotName = "_cnpg_cluster_2"
+ slot3 = "cluster-3"
+ slot4 = "cluster-4"
+ lsnSlot3 = "0/302C4D8"
+ lsnSlot4 = "0/303C4D8"
+ )
+
+ var (
+ config = apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
+ Enabled: ptr.To(true),
+ SlotPrefix: "_cnpg_",
+ },
+ }
+ columns = []string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"}
+ )
+
+ var (
+ dbLocal, dbPrimary *sql.DB
+ mockLocal, mockPrimary sqlmock.Sqlmock
+ )
+
+ BeforeEach(func() {
+ var err error
+ dbLocal, mockLocal, err = sqlmock.New()
+ Expect(err).NotTo(HaveOccurred())
+ dbPrimary, mockPrimary, err = sqlmock.New()
+ Expect(err).NotTo(HaveOccurred())
+ })
+ AfterEach(func() {
+ Expect(mockLocal.ExpectationsWereMet()).To(Succeed(), "failed expectations in LOCAL")
+ Expect(mockPrimary.ExpectationsWereMet()).To(Succeed(), "failed expectations in PRIMARY")
+ })
It("can create slots in local from those on primary", func(ctx SpecContext) {
- localSlotsBefore, err := local.List(ctx, &config)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(localSlotsBefore.Items).Should(BeEmpty())
-
- err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
+ // the primary contains slots
+ mockPrimary.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false).
+ AddRow(slot3, string(infrastructure.SlotTypePhysical), true, lsnSlot3, false).
+ AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false))
+
+ // but the local contains none
+ mockLocal.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns))
+
+ mockLocal.ExpectExec("SELECT pg_create_physical_replication_slot").
+ WithArgs(slot3, true).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ mockLocal.ExpectExec(selectPgSlotAdvance).
+ WithArgs(slot3, lsnSlot3).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ mockLocal.ExpectExec("SELECT pg_create_physical_replication_slot").
+ WithArgs(slot4, true).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ mockLocal.ExpectExec(selectPgSlotAdvance).
+ WithArgs(slot4, lsnSlot4).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
-
- localSlotsAfter, err := local.List(ctx, &config)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(localSlotsAfter.Items).Should(HaveLen(2))
- Expect(localSlotsAfter.Has(slot3)).To(BeTrue())
- Expect(localSlotsAfter.Has(slot4)).To(BeTrue())
- Expect(local.slotsCreated).To(Equal(2))
})
It("can update slots in local when ReplayLSN in primary advanced", func(ctx SpecContext) {
- // advance slot3 in primary
newLSN := "0/308C4D8"
- err := primary.Update(ctx, infrastructure.ReplicationSlot{SlotName: slot3, RestartLSN: newLSN})
- Expect(err).ShouldNot(HaveOccurred())
- err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
+ // Simulate we advance slot3 in primary
+ mockPrimary.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false).
+ AddRow(slot3, string(infrastructure.SlotTypePhysical), true, newLSN, false).
+ AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false))
+ // But local has the old values
+ mockLocal.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(slot3, string(infrastructure.SlotTypePhysical), true, lsnSlot3, false).
+ AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false))
+
+ mockLocal.ExpectExec(selectPgSlotAdvance).
+ WithArgs(slot3, newLSN).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+ mockLocal.ExpectExec(selectPgSlotAdvance).
+ WithArgs(slot4, lsnSlot4).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
-
- localSlotsAfter, err := local.List(ctx, &config)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(localSlotsAfter.Items).Should(HaveLen(2))
- Expect(localSlotsAfter.Has(slot3)).To(BeTrue())
- slot := localSlotsAfter.Get(slot3)
- Expect(slot.RestartLSN).To(Equal(newLSN))
- Expect(local.slotsUpdated).To(Equal(1))
})
- It("can drop slots in local when they are no longer in primary", func(ctx SpecContext) {
- err := primary.Delete(ctx, infrastructure.ReplicationSlot{SlotName: slot4})
- Expect(err).ShouldNot(HaveOccurred())
+ It("can drop inactive slots in local when they are no longer in primary", func(ctx SpecContext) {
+ // Simulate primary has no longer slot4
+ mockPrimary.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false))
+ // But local still has it
+ mockLocal.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(slot4, string(infrastructure.SlotTypePhysical), false, lsnSlot4, false))
- err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
- Expect(err).ShouldNot(HaveOccurred())
+ mockLocal.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot4).
+ WillReturnResult(sqlmock.NewResult(1, 1))
- localSlotsAfter, err := local.List(ctx, &config)
+ err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
- Expect(localSlotsAfter.Items).Should(HaveLen(1))
- Expect(localSlotsAfter.Has(slot3)).To(BeTrue())
- Expect(local.slotsDeleted).To(Equal(1))
})
It("can drop slots in local that hold xmin", func(ctx SpecContext) {
slotWithXmin := "_cnpg_xmin"
- err := primary.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotWithXmin})
- Expect(err).ShouldNot(HaveOccurred())
- local.slots[slotWithXmin] = fakeSlot{name: slotWithXmin, holdsXmin: true}
- localSlotsBefore, err := local.List(ctx, &config)
- Expect(err).ShouldNot(HaveOccurred())
- Expect(localSlotsBefore.Has(slotWithXmin)).To(BeTrue())
-
- err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
- Expect(err).ShouldNot(HaveOccurred())
-
- localSlotsAfter, err := local.List(ctx, &config)
+ mockPrimary.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false).
+ AddRow(slotWithXmin, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", true))
+ mockLocal.ExpectQuery(selectPgReplicationSlots).
+ WillReturnRows(sqlmock.NewRows(columns).
+ AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false).
+ AddRow(slotWithXmin, string(infrastructure.SlotTypePhysical), false, "0/301C4D8", true)) // inactive but with Xmin
+
+ mockLocal.ExpectExec(selectPgSlotAdvance).WithArgs(slotWithXmin, "0/301C4D8").
+ WillReturnResult(sqlmock.NewResult(1, 1))
+ mockLocal.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotWithXmin).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
- Expect(localSlotsAfter.Has(slotWithXmin)).To(BeFalse())
- Expect(localSlotsAfter.Items).Should(HaveLen(1))
- Expect(local.slotsDeleted).To(Equal(2))
})
})
From 7cc309fb0fee6f9f309653a2c3932dd7a3db1c86 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 8 Nov 2024 11:04:29 +0400
Subject: [PATCH 138/836] chore(deps): update spellcheck to v0.45.0 (main)
(#6039)
jonasbn/github-action-spellcheck `0.44.0` -> `0.45.0`
https://redirect.github.com/rojopolis/spellcheck-github-actions `0.44.0` -> `0.45.0`
---
.github/workflows/spellcheck.yml | 2 +-
Makefile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml
index 07b87f3bdf..1fd12f3085 100644
--- a/.github/workflows/spellcheck.yml
+++ b/.github/workflows/spellcheck.yml
@@ -28,4 +28,4 @@ jobs:
uses: actions/checkout@v4
- name: Spellcheck
- uses: rojopolis/spellcheck-github-actions@0.44.0
+ uses: rojopolis/spellcheck-github-actions@0.45.0
diff --git a/Makefile b/Makefile
index 9f06b5ae5f..82201e9083 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
GORELEASER_VERSION ?= v2.4.4
-SPELLCHECK_VERSION ?= 0.44.0
+SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.37.0
OPM_VERSION ?= v1.48.0
From 23aea138340882431df7a5f3a3e27d1e04d80769 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Tue, 12 Nov 2024 07:48:15 +0100
Subject: [PATCH 139/836] fix(test): avoid checking permissions in cloud
services (#6062)
We set the cluster configuration of OpenShift to `credentialsMode: Mint`
to avoid checking the permissions when creating a new OpenShift
instance.
Closes #6061
Signed-off-by: Jonathan Gonzalez V.
---
hack/install-config.yaml.template | 1 +
1 file changed, 1 insertion(+)
diff --git a/hack/install-config.yaml.template b/hack/install-config.yaml.template
index 840388b89b..34e80e3580 100644
--- a/hack/install-config.yaml.template
+++ b/hack/install-config.yaml.template
@@ -31,3 +31,4 @@ platform:
publish: External
pullSecret: '${REDHAT_PULL}'
sshKey: ${SSH_PUBLIC_KEY}
+credentialsMode: Mint
From 7634f7e11b003d3a597c2adbe0f8bcc4b9d39c17 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Wed, 13 Nov 2024 07:46:48 +0100
Subject: [PATCH 140/836] fix: correct transaction code and resolve non
deterministic role apply (#6064)
- Updated transaction management in role reconciler to use `TX` instead
of `DB` within a transaction loop, this was avoiding the expected transaction
rollback
- Fixed flaky unit tests by ensuring the SQL operation order is enforced,
stabilizing test outcomes, this was done by making deterministic the
function used to apply roles, applyRoleActions()
Signed-off-by: Jaime Silvela
---
.../management/controller/roles/postgres.go | 2 +-
.../management/controller/roles/runnable.go | 71 +++++++++----------
2 files changed, 36 insertions(+), 37 deletions(-)
diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go
index b6d7b1bf13..eb1dcf913c 100644
--- a/internal/management/controller/roles/postgres.go
+++ b/internal/management/controller/roles/postgres.go
@@ -264,7 +264,7 @@ func UpdateMembership(
for _, sqlQuery := range queries {
contextLog.Debug("Executing query", "sqlQuery", sqlQuery)
- if _, err := db.ExecContext(ctx, sqlQuery); err != nil {
+ if _, err := tx.ExecContext(ctx, sqlQuery); err != nil {
contextLog.Error(err, "executing query", "sqlQuery", sqlQuery, "err", err)
return wrapErr(err)
}
diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go
index 58c127da00..1d97e6bfaf 100644
--- a/internal/management/controller/roles/runnable.go
+++ b/internal/management/controller/roles/runnable.go
@@ -249,42 +249,12 @@ func (sr *RoleSynchronizer) applyRoleActions(
return nil
}
- for action, roles := range rolesByAction {
- switch action {
- case roleIgnore, roleIsReconciled, roleIsReserved:
- contextLog.Debug("no action required", "action", action)
- continue
- }
-
- contextLog.Info("roles in DB out of sync with Spec, evaluating action",
- "roles", getRoleNames(roles), "action", action)
-
- for _, role := range roles {
- var (
- err error
- appliedState apiv1.PasswordState
- grants, revokes []string
- )
- switch action {
- case roleCreate, roleUpdate:
- appliedState, err = sr.applyRoleCreateUpdate(ctx, db, role, action)
- if err == nil {
- appliedChanges[role.Name] = appliedState
- }
- case roleDelete:
- err = Delete(ctx, db, role.toDatabaseRole())
- case roleSetComment:
- // NOTE: adding/updating a comment on a role does not alter its TransactionID
- err = UpdateComment(ctx, db, role.toDatabaseRole())
- case roleUpdateMemberships:
- // NOTE: revoking / granting to a role does not alter its TransactionID
- dbRole := role.toDatabaseRole()
- grants, revokes, err = getRoleMembershipDiff(ctx, db, role, dbRole)
- if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil {
- return nil, nil, unhandledErr
- }
-
- err = UpdateMembership(ctx, db, dbRole, grants, revokes)
+ actionsCreateUpdate := []roleAction{roleCreate, roleUpdate}
+ for _, action := range actionsCreateUpdate {
+ for _, role := range rolesByAction[action] {
+ appliedState, err := sr.applyRoleCreateUpdate(ctx, db, role, action)
+ if err == nil {
+ appliedChanges[role.Name] = appliedState
}
if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil {
return nil, nil, unhandledErr
@@ -292,6 +262,35 @@ func (sr *RoleSynchronizer) applyRoleActions(
}
}
+ for _, role := range rolesByAction[roleSetComment] {
+ // NOTE: adding/updating a comment on a role does not alter its TransactionID
+ err := UpdateComment(ctx, db, role.toDatabaseRole())
+ if unhandledErr := handleRoleError(err, role.Name, roleSetComment); unhandledErr != nil {
+ return nil, nil, unhandledErr
+ }
+ }
+
+ for _, role := range rolesByAction[roleUpdateMemberships] {
+ // NOTE: revoking / granting to a role does not alter its TransactionID
+ dbRole := role.toDatabaseRole()
+ grants, revokes, err := getRoleMembershipDiff(ctx, db, role, dbRole)
+ if unhandledErr := handleRoleError(err, role.Name, roleUpdateMemberships); unhandledErr != nil {
+ return nil, nil, unhandledErr
+ }
+
+ err = UpdateMembership(ctx, db, dbRole, grants, revokes)
+ if unhandledErr := handleRoleError(err, role.Name, roleUpdateMemberships); unhandledErr != nil {
+ return nil, nil, unhandledErr
+ }
+ }
+
+ for _, role := range rolesByAction[roleDelete] {
+ err := Delete(ctx, db, role.toDatabaseRole())
+ if unhandledErr := handleRoleError(err, role.Name, roleDelete); unhandledErr != nil {
+ return nil, nil, unhandledErr
+ }
+ }
+
return appliedChanges, irreconcilableRoles, nil
}
From 54b234787d3f0f5ec519008d67d87b4a457a1e7d Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 13 Nov 2024 09:34:34 +0100
Subject: [PATCH 141/836] chore(deps): update dependency go to v1.23.3 (main)
(#6050)
---
go.mod | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/go.mod b/go.mod
index cdb1248b22..2adca64b38 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/cloudnative-pg/cloudnative-pg
go 1.23
-toolchain go1.23.2
+toolchain go1.23.3
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
From ac681936011631528fa51246370b0e1f2ebaaaa6 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 13 Nov 2024 10:20:51 +0100
Subject: [PATCH 142/836] chore(deps): update dependency rook/rook to v1.15.5
(main) (#6070)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index f5bc575720..b1891561f3 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -37,7 +37,7 @@ env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.24.0"
- ROOK_VERSION: "v1.15.4"
+ ROOK_VERSION: "v1.15.5"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
From 7d9b9937d56ff460202bcc3d3c08d24b99b91898 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Wed, 13 Nov 2024 10:28:10 +0100
Subject: [PATCH 143/836] chore(security): improve Snyk scan (#6059)
Use snyk `code test` for static analysis, in addition to `test`.
Closes #6058
Signed-off-by: Jonathan Gonzalez V.
---
.github/workflows/snyk.yml | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml
index e41fca8302..9fdb83fe0b 100644
--- a/.github/workflows/snyk.yml
+++ b/.github/workflows/snyk.yml
@@ -16,15 +16,29 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- - name: Run Snyk to check for vulnerabilities
+ - name: Static Code Analysis
uses: snyk/actions/golang@0.4.0
continue-on-error: true
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
with:
- args: --sarif-file-output=snyk.sarif
+ command: 'code test'
+ args: --sarif-file-output=snyk-static.sarif
- name: Upload result to GitHub Code Scanning
uses: github/codeql-action/upload-sarif@v3
with:
- sarif_file: snyk.sarif
+ sarif_file: snyk-static.sarif
+
+ - name: Vulnerability scan
+ uses: snyk/actions/golang@0.4.0
+ continue-on-error: true
+ env:
+ SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
+ with:
+ args: --sarif-file-output=snyk-test.sarif
+
+ - name: Upload result to GitHub Code Scanning
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ sarif_file: snyk-test.sarif
From 43b694379bfc17841323230b8348ba031455d503 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 13 Nov 2024 11:37:59 +0100
Subject: [PATCH 144/836] chore(deps): update kindest/node docker tag to
v1.31.2 (main) (#6075)
---
hack/e2e/run-e2e-kind.sh | 2 +-
hack/setup-cluster.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh
index 3a977f6628..e795c4a4dc 100755
--- a/hack/e2e/run-e2e-kind.sh
+++ b/hack/e2e/run-e2e-kind.sh
@@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e"
export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false}
export BUILD_IMAGE=${BUILD_IMAGE:-false}
-KIND_NODE_DEFAULT_VERSION=v1.31.1
+KIND_NODE_DEFAULT_VERSION=v1.31.2
export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION}
export CLUSTER_ENGINE=kind
export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-}
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index a5444c392f..698baa2e3f 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then
fi
# Defaults
-KIND_NODE_DEFAULT_VERSION=v1.31.1
+KIND_NODE_DEFAULT_VERSION=v1.31.2
K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0
From 8608232c28131d5df88a80ccdec631f198947150 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 13 Nov 2024 14:45:12 +0100
Subject: [PATCH 145/836] chore(deps): update rajatjindal/krew-release-bot
action to v0.0.47 (main) (#6079)
---
.github/workflows/release-publish.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml
index 11cf5be297..d1b429c154 100644
--- a/.github/workflows/release-publish.yml
+++ b/.github/workflows/release-publish.yml
@@ -165,7 +165,7 @@ jobs:
if: |
needs.check-version.outputs.is_latest == 'true' &&
needs.check-version.outputs.is_stable == 'true'
- uses: rajatjindal/krew-release-bot@v0.0.46
+ uses: rajatjindal/krew-release-bot@v0.0.47
with:
krew_template_file: dist/krew/cnpg.yaml
-
From 92f26c60f4f98a61c9b3d9032449eac234bdae6e Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 14 Nov 2024 10:06:15 +0100
Subject: [PATCH 146/836] chore(deps): update dependency kubernetes-sigs/kind
to v0.25.0 (main) (#6095)
---
.github/workflows/continuous-delivery.yml | 2 +-
.github/workflows/continuous-integration.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index b1891561f3..d4d118b458 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -36,7 +36,7 @@ on:
env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
- KIND_VERSION: "v0.24.0"
+ KIND_VERSION: "v0.25.0"
ROOK_VERSION: "v1.15.5"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 212f877293..592525397c 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -19,7 +19,7 @@ env:
GOLANG_VERSION: "1.23.x"
GOLANGCI_LINT_VERSION: "v1.61.0"
KUBEBUILDER_VERSION: "2.3.1"
- KIND_VERSION: "v0.24.0"
+ KIND_VERSION: "v0.25.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
API_DOC_NAME: "cloudnative-pg.v1.md"
SLACK_USERNAME: "cnpg-bot"
From 3c2c3f695eafbf1a0db96954f1796ff37cbd0733 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 14 Nov 2024 11:49:32 +0100
Subject: [PATCH 147/836] chore(deps): update dependency vmware-tanzu/velero to
v1.15.0 (main) (#6102)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index d4d118b458..a8d8d97ecf 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -1343,7 +1343,7 @@ jobs:
name: Setup Velero
uses: nick-fields/retry@v3
env:
- VELERO_VERSION: "v1.14.1"
+ VELERO_VERSION: "v1.15.0"
VELERO_AWS_PLUGIN_VERSION: "v1.10.1"
with:
timeout_minutes: 10
From 726a97358656f1ca900d0c51573461780119f1d1 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Fri, 15 Nov 2024 07:21:00 +0100
Subject: [PATCH 148/836] fix(tests): update the way we check the EKS versions
(#6101)
AWS changed the entire format and content of the documentation for EKS
and the way we detected the supported versions was not working, now we
use the new format and the new path to get the versions.
Closes #6100
Signed-off-by: Jonathan Gonzalez V.
---
.github/workflows/k8s-versions-check.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml
index 4178b76391..9db538b271 100644
--- a/.github/workflows/k8s-versions-check.yml
+++ b/.github/workflows/k8s-versions-check.yml
@@ -42,8 +42,8 @@ jobs:
# There is no command to get EKS k8s versions, we have to parse the documentation
name: Get updated EKS versions
run: |
- DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/main/doc_source/kubernetes-versions.md"
- curl --silent "${DOC_URL}" | grep -E '^\+ `[0-9]\.[0-9]{2}`$' | sed -e 's/[\ +`]//g' | \
+ DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/clusters/kubernetes-versions-standard.adoc"
+ curl --silent "${DOC_URL}" | sed -e 's/.*`Kubernetes` \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \
awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \
jq -Rn '[inputs]' | tee .github/eks_versions.json
if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks'
From b3b6dbe7899d27dd65b42d4ecd89c9d0c3d780b0 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 15 Nov 2024 10:48:49 +0100
Subject: [PATCH 149/836] chore(deps): update dependency
vmware-tanzu/velero-plugin-for-aws to v1.11.0 (main) (#6116)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index a8d8d97ecf..996673da01 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -1344,7 +1344,7 @@ jobs:
uses: nick-fields/retry@v3
env:
VELERO_VERSION: "v1.15.0"
- VELERO_AWS_PLUGIN_VERSION: "v1.10.1"
+ VELERO_AWS_PLUGIN_VERSION: "v1.11.0"
with:
timeout_minutes: 10
max_attempts: 3
From bfc966caa27c08b34a372eb6c0f7fb0a0d31d2ef Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 16 Nov 2024 10:46:58 +0100
Subject: [PATCH 150/836] chore(deps): update all non-major go dependencies
(main) (#6115)
https://github.com/goreleaser/goreleaser k`v2.4.4` -> `v2.4.5`
golang.org/x/term `v0.25.0` -> `v0.26.0`
https://github.com/grpc/grpc-go `v1.67.1` -> `v1.68.0`
golang.org/x/sys `v0.26.0` -> `v0.27.0`
google.golang.org/genproto/googleapis/rpc `v0.0.0-20240814211410-ddb44dafa142` -> `v0.0.0-20240903143218-8af14fe29dc1`
---
Makefile | 2 +-
go.mod | 8 ++++----
go.sum | 16 ++++++++--------
3 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/Makefile b/Makefile
index 82201e9083..b535b35bb4 100644
--- a/Makefile
+++ b/Makefile
@@ -43,7 +43,7 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
-GORELEASER_VERSION ?= v2.4.4
+GORELEASER_VERSION ?= v2.4.5
SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.37.0
diff --git a/go.mod b/go.mod
index 2adca64b38..14918aad86 100644
--- a/go.mod
+++ b/go.mod
@@ -37,8 +37,8 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/term v0.25.0
- google.golang.org/grpc v1.67.1
+ golang.org/x/term v0.26.0
+ google.golang.org/grpc v1.68.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.2
k8s.io/apiextensions-apiserver v0.31.2
@@ -108,12 +108,12 @@ require (
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.26.0 // indirect
+ golang.org/x/sys v0.27.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
diff --git a/go.sum b/go.sum
index 5fc91d02a5..d9cc553609 100644
--- a/go.sum
+++ b/go.sum
@@ -236,10 +236,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
+golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
@@ -258,10 +258,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
-google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
-google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
+google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
From 214af9c68707826405ee0b75ff93e279cdc83fc0 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Mon, 18 Nov 2024 10:43:24 +0100
Subject: [PATCH 151/836] chore: simplify tablespaces interfaces for testing
(#5635)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The tablespaces feature utilizes an unnecessary interface
to mock the database. This ticket replaces it with sqlmock,
improving unit tests by focusing on testing the top-level
`Reconcile` instead of the internals.
Signed-off-by: Jaime Silvela
Signed-off-by: Gabriele Quaresima
Signed-off-by: Niccolò Fei
Co-authored-by: Gabriele Quaresima
Co-authored-by: Niccolò Fei
---
.../controller/tablespaces/actions.go | 13 +-
.../controller/tablespaces/controller_test.go | 445 ++++++++++++------
.../tablespaces/infrastructure/contract.go | 14 -
.../tablespaces/infrastructure/postgres.go | 29 +-
.../infrastructure/postgres_test.go | 38 +-
.../controller/tablespaces/manager.go | 33 +-
.../controller/tablespaces/reconciler.go | 15 +-
.../controller/tablespaces/storage.go | 2 +
.../postgres/readiness/readiness.go | 13 +-
9 files changed, 367 insertions(+), 235 deletions(-)
diff --git a/internal/management/controller/tablespaces/actions.go b/internal/management/controller/tablespaces/actions.go
index d886dc4968..64fcc059d6 100644
--- a/internal/management/controller/tablespaces/actions.go
+++ b/internal/management/controller/tablespaces/actions.go
@@ -18,6 +18,7 @@ package tablespaces
import (
"context"
+ "database/sql"
"github.com/cloudnative-pg/machinery/pkg/log"
@@ -27,7 +28,7 @@ import (
type tablespaceReconcilerStep interface {
execute(ctx context.Context,
- tbsManager infrastructure.TablespaceManager,
+ db *sql.DB,
tbsStorageManager tablespaceStorageManager,
) apiv1.TablespaceState
}
@@ -38,7 +39,7 @@ type createTablespaceAction struct {
func (r *createTablespaceAction) execute(
ctx context.Context,
- tbsManager infrastructure.TablespaceManager,
+ db *sql.DB,
tbsStorageManager tablespaceStorageManager,
) apiv1.TablespaceState {
contextLog := log.FromContext(ctx).WithName("tbs_create_reconciler")
@@ -59,7 +60,7 @@ func (r *createTablespaceAction) execute(
Name: r.tablespace.Name,
Owner: r.tablespace.Owner.Name,
}
- err := tbsManager.Create(ctx, tablespace)
+ err := infrastructure.Create(ctx, db, tablespace)
if err != nil {
contextLog.Error(err, "while performing action", "tablespace", r.tablespace.Name)
return apiv1.TablespaceState{
@@ -83,7 +84,7 @@ type updateTablespaceAction struct {
func (r *updateTablespaceAction) execute(
ctx context.Context,
- tbsManager infrastructure.TablespaceManager,
+ db *sql.DB,
_ tablespaceStorageManager,
) apiv1.TablespaceState {
contextLog := log.FromContext(ctx).WithName("tbs_update_reconciler")
@@ -93,7 +94,7 @@ func (r *updateTablespaceAction) execute(
Name: r.tablespace.Name,
Owner: r.tablespace.Owner.Name,
}
- err := tbsManager.Update(ctx, tablespace)
+ err := infrastructure.Update(ctx, db, tablespace)
if err != nil {
contextLog.Error(
err, "while performing action",
@@ -119,7 +120,7 @@ type noopTablespaceAction struct {
func (r *noopTablespaceAction) execute(
_ context.Context,
- _ infrastructure.TablespaceManager,
+ _ *sql.DB,
_ tablespaceStorageManager,
) apiv1.TablespaceState {
return apiv1.TablespaceState{
diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go
index 4c5bf682ec..4bb80e5409 100644
--- a/internal/management/controller/tablespaces/controller_test.go
+++ b/internal/management/controller/tablespaces/controller_test.go
@@ -18,52 +18,27 @@ package tablespaces
import (
"context"
+ "database/sql"
+ "errors"
"fmt"
"slices"
+ "github.com/DATA-DOG/go-sqlmock"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/tablespaces/infrastructure"
+ schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
-type mockTablespaceManager struct {
- tablespaces map[string]infrastructure.Tablespace
- callHistory []string
-}
-
-func (m *mockTablespaceManager) List(_ context.Context) ([]infrastructure.Tablespace, error) {
- m.callHistory = append(m.callHistory, "list")
- re := make([]infrastructure.Tablespace, len(m.tablespaces))
- i := 0
- for _, r := range m.tablespaces {
- re[i] = r
- i++
- }
- return re, nil
-}
-
-func (m *mockTablespaceManager) Update(
- _ context.Context, _ infrastructure.Tablespace,
-) error {
- m.callHistory = append(m.callHistory, "update")
- return nil
-}
-
-func (m *mockTablespaceManager) Create(
- _ context.Context, tablespace infrastructure.Tablespace,
-) error {
- m.callHistory = append(m.callHistory, "create")
- _, found := m.tablespaces[tablespace.Name]
- if found {
- return fmt.Errorf("trying to create existing tablespace: %s", tablespace.Name)
- }
- m.tablespaces[tablespace.Name] = tablespace
- return nil
-}
-
+// mockTablespaceStorageManager is a storage manager where storage exists by
+// default unless explicitly mounted as unavailable
type mockTablespaceStorageManager struct {
unavailableStorageLocations []string
}
@@ -79,156 +54,322 @@ func (mst mockTablespaceStorageManager) getStorageLocation(tablespaceName string
return fmt.Sprintf("/%s", tablespaceName)
}
-var _ = Describe("Tablespace synchronizer tests", func() {
+type fakeInstance struct {
+ *postgres.Instance
+ db *sql.DB
+}
+
+func (f fakeInstance) GetSuperUserDB() (*sql.DB, error) {
+ return f.db, nil
+}
+
+func (f fakeInstance) CanCheckReadiness() bool {
+ return true
+}
+
+func (f fakeInstance) IsPrimary() (bool, error) {
+ return true, nil
+}
+
+const (
+ expectedListStmt = `
+ SELECT
+ pg_tablespace.spcname spcname,
+ COALESCE(pg_roles.rolname, '') rolname
+ FROM pg_tablespace
+ LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid
+ WHERE spcname NOT LIKE $1
+ `
+ expectedCreateStmt = "CREATE TABLESPACE \"%s\" OWNER \"%s\" " +
+ "LOCATION '%s'"
+
+ expectedUpdateStmt = "ALTER TABLESPACE \"%s\" OWNER TO \"%s\""
+
+ expectedReadinessCheck = `
+ SELECT
+ NOT pg_is_in_recovery()
+ OR (SELECT coalesce(setting, '') = '' FROM pg_settings WHERE name = 'primary_conninfo')
+ OR pg_last_wal_replay_lsn() IS NOT NULL
+ `
+)
+
+func getCluster(ctx context.Context, c client.Client, cluster *apiv1.Cluster) (*apiv1.Cluster, error) {
+ var updatedCluster apiv1.Cluster
+ err := c.Get(ctx, client.ObjectKey{
+ Namespace: cluster.Namespace,
+ Name: cluster.Name,
+ }, &updatedCluster)
+ return &updatedCluster, err
+}
+
+// tablespaceTest represents all the variable bits that go into a test of the
+// tablespace reconciler
+type tablespaceTest struct {
+ tablespacesInSpec []apiv1.TablespaceConfiguration
+ postgresExpectations func(sqlmock.Sqlmock)
+ shouldRequeue bool
+ storageManager tablespaceStorageManager
+ expectedTablespaceStatus []apiv1.TablespaceState
+}
+
+// assertTablespaceReconciled is the full test, going from setting up the mocks
+// and the cluster to verifying all expectations are met
+func assertTablespaceReconciled(ctx context.Context, tt tablespaceTest) {
+ db, dbMock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), sqlmock.MonitorPingsOption(true))
+ Expect(err).ToNot(HaveOccurred())
+
+ DeferCleanup(func() {
+ Expect(dbMock.ExpectationsWereMet()).To(Succeed())
+ })
+
+ cluster := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster-example",
+ Namespace: "default",
+ },
+ }
+ cluster.Spec.Tablespaces = tt.tablespacesInSpec
+
+ fakeClient := fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithObjects(cluster).
+ WithStatusSubresource(&apiv1.Cluster{}).
+ Build()
+
+ pgInstance := postgres.NewInstance().
+ WithNamespace("default").
+ WithClusterName("cluster-example")
+
+ instance := fakeInstance{
+ Instance: pgInstance,
+ db: db,
+ }
+
tablespaceReconciler := TablespaceReconciler{
- instance: postgres.NewInstance().WithNamespace("myPod"),
+ instance: &instance,
+ client: fakeClient,
+ storageManager: tt.storageManager,
+ }
+
+ // these bits happen because the reconciler checks for instance readiness
+ dbMock.ExpectPing()
+ expectedReadiness := sqlmock.NewRows([]string{""}).AddRow("t")
+ dbMock.ExpectQuery(expectedReadinessCheck).WillReturnRows(expectedReadiness)
+
+ tt.postgresExpectations(dbMock)
+
+ results, err := tablespaceReconciler.Reconcile(ctx, reconcile.Request{})
+ Expect(err).ShouldNot(HaveOccurred())
+ if tt.shouldRequeue {
+ Expect(results).NotTo(BeZero())
+ } else {
+ Expect(results).To(BeZero())
}
+ updatedCluster, err := getCluster(ctx, fakeClient, cluster)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(updatedCluster.Status.TablespacesStatus).To(Equal(tt.expectedTablespaceStatus))
+}
+
+var _ = Describe("Tablespace synchronizer tests", func() {
When("tablespace configurations are realizable", func() {
It("will do nothing if the DB contains the tablespaces in spec", func(ctx context.Context) {
- tablespacesSpec := []apiv1.TablespaceConfiguration{
- {
- Name: "foo",
- Storage: apiv1.StorageConfiguration{
- Size: "1Gi",
- },
- Owner: apiv1.DatabaseRoleRef{
- Name: "app",
+ assertTablespaceReconciled(ctx, tablespaceTest{
+ tablespacesInSpec: []apiv1.TablespaceConfiguration{
+ {
+ Name: "foo",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ Owner: apiv1.DatabaseRoleRef{
+ Name: "app",
+ },
},
},
- }
- tbsManager := mockTablespaceManager{
- tablespaces: map[string]infrastructure.Tablespace{
- "foo": {
+ postgresExpectations: func(mock sqlmock.Sqlmock) {
+ // we expect the reconciler to list the tablespaces on the DB
+ rows := sqlmock.NewRows(
+ []string{"spcname", "rolname"}).
+ AddRow("foo", "app")
+ mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows)
+ },
+ shouldRequeue: false,
+ expectedTablespaceStatus: []apiv1.TablespaceState{
+ {
Name: "foo",
Owner: "app",
+ State: "reconciled",
},
},
- }
- tbsInDatabase, err := tbsManager.List(ctx)
- Expect(err).ShouldNot(HaveOccurred())
- tbsSteps := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec)
- result := tablespaceReconciler.applySteps(ctx, &tbsManager,
- mockTablespaceStorageManager{}, tbsSteps)
- Expect(result).To(ConsistOf(apiv1.TablespaceState{
- Name: "foo",
- Owner: "app",
- State: apiv1.TablespaceStatusReconciled,
- Error: "",
- }))
- Expect(tbsManager.callHistory).To(HaveLen(1))
- Expect(tbsManager.callHistory).To(ConsistOf("list"))
+ })
})
It("will change the owner when needed", func(ctx context.Context) {
- tablespacesSpec := []apiv1.TablespaceConfiguration{
- {
- Name: "foo",
- Storage: apiv1.StorageConfiguration{
- Size: "1Gi",
- },
- Owner: apiv1.DatabaseRoleRef{
- Name: "new_user",
+ assertTablespaceReconciled(ctx, tablespaceTest{
+ tablespacesInSpec: []apiv1.TablespaceConfiguration{
+ {
+ Name: "foo",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ Owner: apiv1.DatabaseRoleRef{
+ Name: "new_user",
+ },
},
},
- }
- tbsManager := mockTablespaceManager{
- tablespaces: map[string]infrastructure.Tablespace{
- "foo": {
+ postgresExpectations: func(mock sqlmock.Sqlmock) {
+ rows := sqlmock.NewRows(
+ []string{"spcname", "rolname"}).
+ AddRow("foo", "app")
+ mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows)
+ stmt := fmt.Sprintf(expectedUpdateStmt, "foo", "new_user")
+ mock.ExpectExec(stmt).
+ WillReturnResult(sqlmock.NewResult(2, 1))
+ },
+ shouldRequeue: false,
+ expectedTablespaceStatus: []apiv1.TablespaceState{
+ {
Name: "foo",
- Owner: "app",
+ Owner: "new_user",
+ State: "reconciled",
},
},
- }
- tbsInDatabase, err := tbsManager.List(ctx)
- Expect(err).ShouldNot(HaveOccurred())
- tbsByAction := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec)
- result := tablespaceReconciler.applySteps(ctx, &tbsManager,
- mockTablespaceStorageManager{}, tbsByAction)
- Expect(result).To(ConsistOf(
- apiv1.TablespaceState{
- Name: "foo",
- Owner: "new_user",
- State: apiv1.TablespaceStatusReconciled,
- Error: "",
- },
- ))
- Expect(tbsManager.callHistory).To(HaveLen(2))
- Expect(tbsManager.callHistory).To(ConsistOf("list", "update"))
+ })
})
- It("will create a tablespace in spec that is missing from DB", func(ctx context.Context) {
- tablespacesSpec := []apiv1.TablespaceConfiguration{
- {
- Name: "foo",
- Storage: apiv1.StorageConfiguration{
- Size: "1Gi",
+ It("will create a tablespace in spec that is missing from DB if mount point exists", func(ctx context.Context) {
+ assertTablespaceReconciled(ctx, tablespaceTest{
+ tablespacesInSpec: []apiv1.TablespaceConfiguration{
+ {
+ Name: "foo",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ },
+ {
+ Name: "bar",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ Owner: apiv1.DatabaseRoleRef{
+ Name: "new_user",
+ },
+ },
+ },
+ postgresExpectations: func(mock sqlmock.Sqlmock) {
+ // we expect the reconciler to list the tablespaces on DB, and to
+ // create a new tablespace
+ rows := sqlmock.NewRows(
+ []string{"spcname", "rolname"}).
+ AddRow("foo", "")
+ mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows)
+ stmt := fmt.Sprintf(expectedCreateStmt, "bar", "new_user", "/var/lib/postgresql/tablespaces/bar/data")
+ mock.ExpectExec(stmt).
+ WillReturnResult(sqlmock.NewResult(2, 1))
+ },
+ shouldRequeue: false,
+ storageManager: mockTablespaceStorageManager{
+ unavailableStorageLocations: []string{
+ "/foo",
},
},
- {
- Name: "bar",
- Storage: apiv1.StorageConfiguration{
- Size: "1Gi",
+ expectedTablespaceStatus: []apiv1.TablespaceState{
+ {
+ Name: "foo",
+ Owner: "",
+ State: "reconciled",
+ },
+ {
+ Name: "bar",
+ Owner: "new_user",
+ State: "reconciled",
},
},
- }
- tbsManager := mockTablespaceManager{
- tablespaces: map[string]infrastructure.Tablespace{
- "foo": {
+ })
+ })
+
+ It("will mark tablespace status as pending with error when the DB CREATE fails", func(ctx context.Context) {
+ assertTablespaceReconciled(ctx, tablespaceTest{
+ tablespacesInSpec: []apiv1.TablespaceConfiguration{
+ {
Name: "foo",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ },
+ {
+ Name: "bar",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ Owner: apiv1.DatabaseRoleRef{
+ Name: "new_user",
+ },
+ },
+ },
+ postgresExpectations: func(mock sqlmock.Sqlmock) {
+ // we expect the reconciler to list the tablespaces on DB, and to
+ // create a new tablespace
+ rows := sqlmock.NewRows(
+ []string{"spcname", "rolname"}).
+ AddRow("foo", "")
+ mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows)
+ // we simulate DB command failure
+ stmt := fmt.Sprintf(expectedCreateStmt, "bar", "new_user", "/var/lib/postgresql/tablespaces/bar/data")
+ mock.ExpectExec(stmt).
+ WillReturnError(errors.New("boom"))
+ },
+ shouldRequeue: true,
+ storageManager: mockTablespaceStorageManager{
+ unavailableStorageLocations: []string{
+ "/foo",
+ },
+ },
+ expectedTablespaceStatus: []apiv1.TablespaceState{
+ {
+ Name: "foo",
+ Owner: "",
+ State: "reconciled",
+ },
+ {
+ Name: "bar",
+ Owner: "new_user",
+ State: "pending",
+ Error: "while creating tablespace bar: boom",
},
},
- }
- tbsInDatabase, err := tbsManager.List(ctx)
- Expect(err).ShouldNot(HaveOccurred())
- tbsSteps := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec)
- result := tablespaceReconciler.applySteps(ctx, &tbsManager,
- mockTablespaceStorageManager{}, tbsSteps)
- Expect(result).To(ConsistOf(
- apiv1.TablespaceState{
- Name: "foo",
- Owner: "",
- State: apiv1.TablespaceStatusReconciled,
- },
- apiv1.TablespaceState{
- Name: "bar",
- Owner: "",
- State: apiv1.TablespaceStatusReconciled,
- },
- ))
- Expect(tbsManager.callHistory).To(HaveLen(2))
- Expect(tbsManager.callHistory).To(ConsistOf("list", "create"))
+ })
})
It("will requeue the tablespace creation if the mount path doesn't exist", func(ctx context.Context) {
- tablespacesSpec := []apiv1.TablespaceConfiguration{
- {
- Name: "foo",
- Storage: apiv1.StorageConfiguration{
- Size: "1Gi",
- },
- },
- }
- tbsManager := mockTablespaceManager{}
- tbsInDatabase, err := tbsManager.List(ctx)
- Expect(err).ShouldNot(HaveOccurred())
- tbsByAction := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec)
- result := tablespaceReconciler.applySteps(ctx, &tbsManager,
- mockTablespaceStorageManager{
+ assertTablespaceReconciled(ctx, tablespaceTest{
+ tablespacesInSpec: []apiv1.TablespaceConfiguration{
+ {
+ Name: "foo",
+ Storage: apiv1.StorageConfiguration{
+ Size: "1Gi",
+ },
+ },
+ },
+ postgresExpectations: func(mock sqlmock.Sqlmock) {
+ rows := sqlmock.NewRows(
+ []string{"spcname", "rolname"})
+ mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows)
+ },
+ shouldRequeue: true,
+ storageManager: mockTablespaceStorageManager{
unavailableStorageLocations: []string{
"/foo",
},
- }, tbsByAction)
- Expect(result).To(ConsistOf(
- apiv1.TablespaceState{
- Name: "foo",
- Owner: "",
- State: apiv1.TablespaceStatusPendingReconciliation,
- Error: "deferred until mount point is created",
- },
- ))
- Expect(tbsManager.callHistory).To(HaveLen(1))
- Expect(tbsManager.callHistory).To(ConsistOf("list"))
+ },
+ expectedTablespaceStatus: []apiv1.TablespaceState{
+ {
+ Name: "foo",
+ Owner: "",
+ State: "pending",
+ Error: "deferred until mount point is created",
+ },
+ },
+ })
})
})
})
diff --git a/internal/management/controller/tablespaces/infrastructure/contract.go b/internal/management/controller/tablespaces/infrastructure/contract.go
index 398e277849..d9a3bd16a0 100644
--- a/internal/management/controller/tablespaces/infrastructure/contract.go
+++ b/internal/management/controller/tablespaces/infrastructure/contract.go
@@ -16,8 +16,6 @@ limitations under the License.
package infrastructure
-import "context"
-
// Tablespace represents the tablespace information read from / written to the Database
type Tablespace struct {
// Name is the name of the tablespace
@@ -26,15 +24,3 @@ type Tablespace struct {
// Owner is the owner of this tablespace
Owner string `json:"owner"`
}
-
-// TablespaceManager abstracts the functionality of reconciling with PostgreSQL tablespaces
-type TablespaceManager interface {
- // List the tablespace in the database
- List(ctx context.Context) ([]Tablespace, error)
-
- // Create the tablespace in the database
- Create(ctx context.Context, tablespace Tablespace) error
-
- // Update the tablespace in the database (change ownership)
- Update(ctx context.Context, tablespace Tablespace) error
-}
diff --git a/internal/management/controller/tablespaces/infrastructure/postgres.go b/internal/management/controller/tablespaces/infrastructure/postgres.go
index 6b01c8184a..16f6eb0ae7 100644
--- a/internal/management/controller/tablespaces/infrastructure/postgres.go
+++ b/internal/management/controller/tablespaces/infrastructure/postgres.go
@@ -28,31 +28,14 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
)
-// postgresTablespaceManager is a TablespaceManager for a database instance
-type postgresTablespaceManager struct {
- superUserDB *sql.DB
-}
-
-// NewPostgresTablespaceManager returns an implementation of TablespaceManager for postgres
-func NewPostgresTablespaceManager(superDB *sql.DB) TablespaceManager {
- return newPostgresTablespaceManager(superDB)
-}
-
-// NewPostgresTablespaceManager returns an implementation of TablespaceManager for postgres
-func newPostgresTablespaceManager(superDB *sql.DB) postgresTablespaceManager {
- return postgresTablespaceManager{
- superUserDB: superDB,
- }
-}
-
// List the tablespaces in the database
// The content exclude pg_default and pg_global database
-func (tbsMgr postgresTablespaceManager) List(ctx context.Context) ([]Tablespace, error) {
+func List(ctx context.Context, db *sql.DB) ([]Tablespace, error) {
logger := log.FromContext(ctx).WithName("tbs_reconciler_list")
logger.Trace("Invoked list")
wrapErr := func(err error) error { return fmt.Errorf("while listing DB tablespaces: %w", err) }
- rows, err := tbsMgr.superUserDB.QueryContext(
+ rows, err := db.QueryContext(
ctx,
`
SELECT
@@ -93,7 +76,7 @@ func (tbsMgr postgresTablespaceManager) List(ctx context.Context) ([]Tablespace,
}
// Create the tablespace in the database, if tablespace is temporary tablespace, need reload configure
-func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespace) error {
+func Create(ctx context.Context, db *sql.DB, tbs Tablespace) error {
contextLog := log.FromContext(ctx).WithName("tbs_reconciler_create")
tablespaceLocation := specs.LocationForTablespace(tbs.Name)
@@ -104,7 +87,7 @@ func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespa
return fmt.Errorf("while creating tablespace %s: %w", tbs.Name, err)
}
var err error
- if _, err = tbsMgr.superUserDB.ExecContext(
+ if _, err = db.ExecContext(
ctx,
fmt.Sprintf(
"CREATE TABLESPACE %s OWNER %s LOCATION '%s'",
@@ -119,7 +102,7 @@ func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespa
}
// Update the tablespace in the database (change ownership)
-func (tbsMgr postgresTablespaceManager) Update(ctx context.Context, tbs Tablespace) error {
+func Update(ctx context.Context, db *sql.DB, tbs Tablespace) error {
contextLog := log.FromContext(ctx).WithName("tbs_reconciler_update")
tablespaceLocation := specs.LocationForTablespace(tbs.Name)
@@ -130,7 +113,7 @@ func (tbsMgr postgresTablespaceManager) Update(ctx context.Context, tbs Tablespa
return fmt.Errorf("while updating tablespace %s: %w", tbs.Name, err)
}
var err error
- if _, err = tbsMgr.superUserDB.ExecContext(
+ if _, err = db.ExecContext(
ctx,
fmt.Sprintf(
"ALTER TABLESPACE %s OWNER TO %s",
diff --git a/internal/management/controller/tablespaces/infrastructure/postgres_test.go b/internal/management/controller/tablespaces/infrastructure/postgres_test.go
index 51299e6c57..78b6e10f9a 100644
--- a/internal/management/controller/tablespaces/infrastructure/postgres_test.go
+++ b/internal/management/controller/tablespaces/infrastructure/postgres_test.go
@@ -36,30 +36,34 @@ var _ = Describe("Postgres tablespaces functions test", func() {
`
expectedCreateStmt := "CREATE TABLESPACE \"%s\" OWNER \"%s\" " +
"LOCATION '/var/lib/postgresql/tablespaces/atablespace/data'"
+
+ expectedUpdateStmt := "ALTER TABLESPACE \"%s\" OWNER TO \"%s\""
+
It("should send the expected query to list tablespaces and parse the return", func(ctx SpecContext) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
+ tbsName := "atablespace"
+ anotherTbsName := "anothertablespace"
+ ownerName := "postgres"
- tbsManager := newPostgresTablespaceManager(db)
rows := sqlmock.NewRows(
[]string{"spcname", "rolname"}).
- AddRow("atablespace", "postgres").
- AddRow("anothertablespace", "postgres")
+ AddRow(tbsName, ownerName).
+ AddRow(anotherTbsName, ownerName)
mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows)
- tbs, err := tbsManager.List(ctx)
+ tbs, err := List(ctx, db)
Expect(err).ShouldNot(HaveOccurred())
Expect(tbs).To(HaveLen(2))
Expect(tbs).To(ConsistOf(
- Tablespace{Name: "atablespace", Owner: "postgres"},
- Tablespace{Name: "anothertablespace", Owner: "postgres"}))
+ Tablespace{Name: tbsName, Owner: ownerName},
+ Tablespace{Name: anotherTbsName, Owner: ownerName}))
})
It("should detect error if the list query returns error", func(ctx SpecContext) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())
- tbsManager := newPostgresTablespaceManager(db)
mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnError(fmt.Errorf("boom"))
- tbs, err := tbsManager.List(ctx)
+ tbs, err := List(ctx, db)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("boom"))
Expect(tbs).To(BeEmpty())
@@ -71,10 +75,9 @@ var _ = Describe("Postgres tablespaces functions test", func() {
tbsName := "atablespace"
ownerName := "postgres"
stmt := fmt.Sprintf(expectedCreateStmt, tbsName, ownerName)
- tbsManager := newPostgresTablespaceManager(db)
mock.ExpectExec(stmt).
WillReturnResult(sqlmock.NewResult(2, 1))
- err = tbsManager.Create(ctx, Tablespace{Name: tbsName, Owner: "postgres"})
+ err = Create(ctx, db, Tablespace{Name: tbsName, Owner: ownerName})
Expect(err).ShouldNot(HaveOccurred())
Expect(mock.ExpectationsWereMet()).To(Succeed())
})
@@ -84,12 +87,23 @@ var _ = Describe("Postgres tablespaces functions test", func() {
tbsName := "atablespace"
ownerName := "postgres"
stmt := fmt.Sprintf(expectedCreateStmt, tbsName, ownerName)
- tbsManager := newPostgresTablespaceManager(db)
mock.ExpectExec(stmt).
WillReturnError(fmt.Errorf("boom"))
- err = tbsManager.Create(ctx, Tablespace{Name: tbsName, Owner: "postgres"})
+ err = Create(ctx, db, Tablespace{Name: tbsName, Owner: ownerName})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("boom"))
Expect(mock.ExpectationsWereMet()).To(Succeed())
})
+ It("should issue the expected command to update a tablespace", func(ctx SpecContext) {
+ db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+ tbsName := "atablespace"
+ ownerName := "postgres"
+ stmt := fmt.Sprintf(expectedUpdateStmt, tbsName, ownerName)
+ mock.ExpectExec(stmt).
+ WillReturnResult(sqlmock.NewResult(2, 1))
+ err = Update(ctx, db, Tablespace{Name: tbsName, Owner: ownerName})
+ Expect(err).ShouldNot(HaveOccurred())
+ Expect(mock.ExpectationsWereMet()).To(Succeed())
+ })
})
diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go
index 1b793189dc..6ba7490a6f 100644
--- a/internal/management/controller/tablespaces/manager.go
+++ b/internal/management/controller/tablespaces/manager.go
@@ -18,6 +18,7 @@ package tablespaces
import (
"context"
+ "database/sql"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
@@ -27,18 +28,30 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
)
+// instanceInterface represents the behavior required for the reconciler for
+// instance operations
+type instanceInterface interface {
+ GetNamespaceName() string
+ GetClusterName() string
+ GetSuperUserDB() (*sql.DB, error)
+ IsPrimary() (bool, error)
+ CanCheckReadiness() bool
+}
+
// TablespaceReconciler is a Kubernetes controller that ensures Tablespaces
// are created in Postgres
type TablespaceReconciler struct {
- instance *postgres.Instance
- client client.Client
+ instance instanceInterface
+ storageManager tablespaceStorageManager
+ client client.Client
}
// NewTablespaceReconciler creates a new TablespaceReconciler
func NewTablespaceReconciler(instance *postgres.Instance, client client.Client) *TablespaceReconciler {
controller := &TablespaceReconciler{
- instance: instance,
- client: client,
+ instance: instance,
+ client: client,
+ storageManager: instanceTablespaceStorageManager{},
}
return controller
}
@@ -54,7 +67,7 @@ func (r *TablespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
// GetCluster gets the managed cluster through the client
func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) {
var cluster apiv1.Cluster
- err := r.GetClient().Get(ctx,
+ err := r.client.Get(ctx,
types.NamespacedName{
Namespace: r.instance.GetNamespaceName(),
Name: r.instance.GetClusterName(),
@@ -66,13 +79,3 @@ func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster,
return &cluster, nil
}
-
-// GetClient returns the dynamic client that is being used for a certain reconciler
-func (r *TablespaceReconciler) GetClient() client.Client {
- return r.client
-}
-
-// Instance returns the PostgreSQL instance that this reconciler is working on
-func (r *TablespaceReconciler) Instance() *postgres.Instance {
- return r.instance
-}
diff --git a/internal/management/controller/tablespaces/reconciler.go b/internal/management/controller/tablespaces/reconciler.go
index 2c1e70d79f..031ea0bfeb 100644
--- a/internal/management/controller/tablespaces/reconciler.go
+++ b/internal/management/controller/tablespaces/reconciler.go
@@ -18,6 +18,7 @@ package tablespaces
import (
"context"
+ "database/sql"
"fmt"
"time"
@@ -96,9 +97,7 @@ func (r *TablespaceReconciler) reconcile(
return nil, fmt.Errorf("while reconcile tablespaces: %w", err)
}
- tbsManager := infrastructure.NewPostgresTablespaceManager(superUserDB)
- tbsStorageManager := instanceTablespaceStorageManager{}
- tbsInDatabase, err := tbsManager.List(ctx)
+ tbsInDatabase, err := infrastructure.List(ctx, superUserDB)
if err != nil {
return nil, fmt.Errorf("could not fetch tablespaces from database: %w", err)
}
@@ -106,15 +105,14 @@ func (r *TablespaceReconciler) reconcile(
steps := evaluateNextSteps(ctx, tbsInDatabase, cluster.Spec.Tablespaces)
result := r.applySteps(
ctx,
- tbsManager,
- tbsStorageManager,
+ superUserDB,
steps,
)
// update the cluster status
updatedCluster := cluster.DeepCopy()
updatedCluster.Status.TablespacesStatus = result
- if err := r.GetClient().Status().Patch(ctx, updatedCluster, client.MergeFrom(cluster)); err != nil {
+ if err := r.client.Status().Patch(ctx, updatedCluster, client.MergeFrom(cluster)); err != nil {
return nil, fmt.Errorf("while setting the tablespace reconciler status: %w", err)
}
@@ -132,14 +130,13 @@ func (r *TablespaceReconciler) reconcile(
// if they arose when applying the steps
func (r *TablespaceReconciler) applySteps(
ctx context.Context,
- tbsManager infrastructure.TablespaceManager,
- tbsStorageManager tablespaceStorageManager,
+ db *sql.DB,
actions []tablespaceReconcilerStep,
) []apiv1.TablespaceState {
result := make([]apiv1.TablespaceState, len(actions))
for idx, step := range actions {
- result[idx] = step.execute(ctx, tbsManager, tbsStorageManager)
+ result[idx] = step.execute(ctx, db, r.storageManager)
}
return result
diff --git a/internal/management/controller/tablespaces/storage.go b/internal/management/controller/tablespaces/storage.go
index c9984305aa..d8ca019dff 100644
--- a/internal/management/controller/tablespaces/storage.go
+++ b/internal/management/controller/tablespaces/storage.go
@@ -22,6 +22,8 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
)
+// tablespaceStorageManager represents the required behavior in terms of storage
+// for the tablespace reconciler
type tablespaceStorageManager interface {
getStorageLocation(tbsName string) string
storageExists(tbsName string) (bool, error)
diff --git a/pkg/management/postgres/readiness/readiness.go b/pkg/management/postgres/readiness/readiness.go
index fc2d0f748b..ed1edb90dc 100644
--- a/pkg/management/postgres/readiness/readiness.go
+++ b/pkg/management/postgres/readiness/readiness.go
@@ -18,23 +18,28 @@ package readiness
import (
"context"
+ "database/sql"
"errors"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
)
// ErrStreamingReplicaNotConnected is raised for streaming replicas that never connected to its primary
var ErrStreamingReplicaNotConnected = errors.New("streaming replica was never connected to the primary node")
+// instanceInterface represents the required behavior for use in the readiness probe
+type instanceInterface interface {
+ CanCheckReadiness() bool
+ GetSuperUserDB() (*sql.DB, error)
+}
+
// Data is the readiness checker structure
type Data struct {
- instance *postgres.Instance
+ instance instanceInterface
streamingReplicaValidated bool
}
// ForInstance creates a readiness checker for a certain instance
-func ForInstance(instance *postgres.Instance) *Data {
+func ForInstance(instance instanceInterface) *Data {
return &Data{
instance: instance,
}
From c556f597345d386cf5cb8e6a0f4a68b162e6f4ad Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Mon, 18 Nov 2024 15:44:28 +0100
Subject: [PATCH 152/836] chore(perf): Use controller-runtime pprof server
(#6000)
The controller-runtime provides a pprof server that we can just enable,
this use that capability and remove the pprof server we used to have.
Closes #5999
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
config/manager/env_override.yaml | 4 ++
internal/cmd/manager/controller/controller.go | 46 ++-----------------
2 files changed, 9 insertions(+), 41 deletions(-)
diff --git a/config/manager/env_override.yaml b/config/manager/env_override.yaml
index 867e3b7f83..633bff2e15 100644
--- a/config/manager/env_override.yaml
+++ b/config/manager/env_override.yaml
@@ -20,3 +20,7 @@ spec:
- --webhook-port=9443
- --log-level=debug
- --pprof-server=true
+ ports:
+ - containerPort: 6060
+ name: pprof
+ protocol: TCP
diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go
index b61f967844..fe8028545d 100644
--- a/internal/cmd/manager/controller/controller.go
+++ b/internal/cmd/manager/controller/controller.go
@@ -19,10 +19,8 @@ package controller
import (
"context"
- "errors"
"fmt"
"net/http"
- "net/http/pprof"
"time"
"github.com/cloudnative-pg/machinery/pkg/log"
@@ -41,7 +39,6 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/controller"
schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
"github.com/cloudnative-pg/cloudnative-pg/pkg/multicache"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
@@ -101,15 +98,10 @@ func RunController(
conf *configuration.Data,
) error {
ctx := context.Background()
-
setupLog.Info("Starting CloudNativePG Operator",
"version", versions.Version,
"build", versions.Info)
- if pprofDebug {
- startPprofDebugServer(ctx)
- }
-
managerOptions := ctrl.Options{
Scheme: scheme,
Metrics: server.Options{
@@ -123,6 +115,7 @@ func RunController(
Port: port,
CertDir: defaultWebhookCertDir,
}),
+ PprofBindAddress: getPprofServerAddress(pprofDebug),
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
@@ -464,39 +457,10 @@ func readSecret(
return data, nil
}
-// startPprofDebugServer exposes pprof debug server if the pprof-server env variable is set to true
-func startPprofDebugServer(ctx context.Context) {
- mux := http.NewServeMux()
- mux.HandleFunc("/debug/pprof/", pprof.Index)
- mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
- mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
- mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
- mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
-
- pprofServer := http.Server{
- Addr: "0.0.0.0:6060",
- Handler: mux,
- ReadTimeout: webserver.DefaultReadTimeout,
- ReadHeaderTimeout: webserver.DefaultReadHeaderTimeout,
+func getPprofServerAddress(enabled bool) string {
+ if enabled {
+ return "0.0.0.0:6060"
}
- setupLog.Info("Starting pprof HTTP server", "addr", pprofServer.Addr)
-
- go func() {
- go func() {
- <-ctx.Done()
-
- setupLog.Info("shutting down pprof HTTP server")
- ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancelFunc()
-
- if err := pprofServer.Shutdown(ctx); err != nil {
- setupLog.Error(err, "Failed to shutdown pprof HTTP server")
- }
- }()
-
- if err := pprofServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
- setupLog.Error(err, "Failed to start pprof HTTP server")
- }
- }()
+ return ""
}
From 8714eaa8070d135f203f9fb0676d246deafc2721 Mon Sep 17 00:00:00 2001
From: Sander Rodenhuis <53382213+srodenhuis@users.noreply.github.com>
Date: Wed, 20 Nov 2024 14:03:25 +0100
Subject: [PATCH 153/836] docs: add Akamai Technologies as an adopter (#6140)
Add Akamai Technologies as an adopter. Akamai used cloudnative-pg for
all managed databases in the Akamai App Platform (for LKE). See:
https://github.com/linode/apl-core
Signed-off-by: Sander Rodenhuis
---
ADOPTERS.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/ADOPTERS.md b/ADOPTERS.md
index c2038c9480..03a9c9841d 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -56,3 +56,4 @@ This list is sorted in chronological order, based on the submission date.
| [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. |
| [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. |
| [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. |
+| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. |
From 2b0dc71181362b1a81a5b6e08c789fd00bec7f8f Mon Sep 17 00:00:00 2001
From: Casper Nielsen
Date: Wed, 20 Nov 2024 14:06:44 +0100
Subject: [PATCH 154/836] docs: add Novo Nordisk as a CloudNativePG adopter
(#6142)
We've been running CloudNativePG clusters as described for the last 9-10
months and are extremely happy with the operator. Glad to show our
support this way!
Signed-off-by: Casper Nielsen
---
ADOPTERS.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/ADOPTERS.md b/ADOPTERS.md
index 03a9c9841d..7ce2afc859 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -57,3 +57,4 @@ This list is sorted in chronological order, based on the submission date.
| [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. |
| [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. |
| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. |
+| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-24 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. |
From 72e2f5c490fd17346f1f9ff539589a8d9f01690c Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Thu, 21 Nov 2024 11:01:44 +0100
Subject: [PATCH 155/836] feat(plugin): allow "plugin" backup method in kubectl
cnpg backup (#6045)
This patch adds the following features to The `kubectl cnpg backup`
subcommand:
* add the `plugin` backup method.
* add a `plugin-name` option that allows the user to specify the plugin
that should manage the backup.
* add a new `plugin-parameters` option that allows the user to specify
a set of parameters to be passed to the plugin while taking a backup
of a cluster.
Signed-off-by: Leonardo Cecchi
Signed-off-by: Armando Ruocco
Signed-off-by: wolfox
Co-authored-by: Armando Ruocco
Co-authored-by: wolfox
---
internal/cmd/plugin/backup/cmd.go | 55 ++++++++++++++++---
internal/cmd/plugin/backup/parameters.go | 53 ++++++++++++++++++
internal/cmd/plugin/backup/parameters_test.go | 53 ++++++++++++++++++
internal/cmd/plugin/backup/suite_test.go | 30 ++++++++++
4 files changed, 183 insertions(+), 8 deletions(-)
create mode 100644 internal/cmd/plugin/backup/parameters.go
create mode 100644 internal/cmd/plugin/backup/parameters_test.go
create mode 100644 internal/cmd/plugin/backup/suite_test.go
diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go
index 6c29147f7f..17aceabe31 100644
--- a/internal/cmd/plugin/backup/cmd.go
+++ b/internal/cmd/plugin/backup/cmd.go
@@ -21,6 +21,7 @@ import (
"fmt"
"slices"
"strconv"
+ "strings"
"time"
pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time"
@@ -44,6 +45,8 @@ type backupCommandOptions struct {
online *bool
immediateCheckpoint *bool
waitForArchive *bool
+ pluginName string
+ pluginParameters pluginParameters
}
func (options backupCommandOptions) getOnlineConfiguration() *apiv1.OnlineConfiguration {
@@ -59,7 +62,14 @@ func (options backupCommandOptions) getOnlineConfiguration() *apiv1.OnlineConfig
// NewCmd creates the new "backup" subcommand
func NewCmd() *cobra.Command {
- var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive string
+ var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive, pluginName string
+ var pluginParameters pluginParameters
+
+ backupMethods := []string{
+ string(apiv1.BackupMethodBarmanObjectStore),
+ string(apiv1.BackupMethodVolumeSnapshot),
+ string(apiv1.BackupMethodPlugin),
+ }
backupSubcommand := &cobra.Command{
Use: "backup [cluster]",
@@ -91,15 +101,24 @@ func NewCmd() *cobra.Command {
}
// Check if the backup method is correct
- allowedBackupMethods := []string{
- "",
- string(apiv1.BackupMethodBarmanObjectStore),
- string(apiv1.BackupMethodVolumeSnapshot),
- }
+ allowedBackupMethods := backupMethods
+ allowedBackupMethods = append(allowedBackupMethods, "")
if !slices.Contains(allowedBackupMethods, backupMethod) {
return fmt.Errorf("backup-method: %s is not supported by the backup command", backupMethod)
}
+ if backupMethod != string(apiv1.BackupMethodPlugin) {
+ if len(pluginName) > 0 {
+ return fmt.Errorf("plugin-name is allowed only when backup method in %s",
+ apiv1.BackupMethodPlugin)
+ }
+
+ if len(pluginParameters) > 0 {
+ return fmt.Errorf("plugin-parameters is allowed only when backup method in %s",
+ apiv1.BackupMethodPlugin)
+ }
+ }
+
var cluster apiv1.Cluster
// check if the cluster exists
err := plugin.Client.Get(
@@ -137,6 +156,8 @@ func NewCmd() *cobra.Command {
online: parsedOnline,
immediateCheckpoint: parsedImmediateCheckpoint,
waitForArchive: parsedWaitForArchive,
+ pluginName: pluginName,
+ pluginParameters: pluginParameters,
})
},
}
@@ -161,8 +182,8 @@ func NewCmd() *cobra.Command {
"method",
"m",
"",
- "If present, will override the backup method defined in backup resource, "+
- "valid values are volumeSnapshot and barmanObjectStore.",
+ fmt.Sprintf("If present, will override the backup method defined in backup resource, "+
+ "valid values are: %s.", strings.Join(backupMethods, ", ")),
)
const optionalAcceptedValues = "Optional. Accepted values: true|false|\"\"."
@@ -188,6 +209,17 @@ func NewCmd() *cobra.Command {
optionalAcceptedValues,
)
+ backupSubcommand.Flags().StringVar(&pluginName, "plugin-name", "",
+ "The name of the plugin that should take the backup. This option "+
+ "is allowed only when the backup method is set to 'plugin'",
+ )
+
+ backupSubcommand.Flags().VarP(&pluginParameters, "plugin-parameters", "",
+ "The set of plugin parameters that should be passed to the backup plugin "+
+ " i.e. param-one=value,param-two=value. This option "+
+ "is allowed only when the backup method is set to 'plugin'",
+ )
+
return backupSubcommand
}
@@ -210,6 +242,13 @@ func createBackup(ctx context.Context, options backupCommandOptions) error {
}
utils.LabelClusterName(&backup.ObjectMeta, options.clusterName)
+ if len(options.pluginName) > 0 {
+ backup.Spec.PluginConfiguration = &apiv1.BackupPluginConfiguration{
+ Name: options.pluginName,
+ Parameters: options.pluginParameters,
+ }
+ }
+
err := plugin.Client.Create(ctx, &backup)
if err == nil {
fmt.Printf("backup/%v created\n", backup.Name)
diff --git a/internal/cmd/plugin/backup/parameters.go b/internal/cmd/plugin/backup/parameters.go
new file mode 100644
index 0000000000..126031a0e3
--- /dev/null
+++ b/internal/cmd/plugin/backup/parameters.go
@@ -0,0 +1,53 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package backup
+
+import (
+ "strings"
+
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
+)
+
+// pluginParameters is a set of parameters to be passed
+// to the plugin when taking a backup
+type pluginParameters map[string]string
+
+// String implements the pflag.Value interface
+func (e pluginParameters) String() string {
+ return strings.Join(stringset.FromKeys(e).ToList(), ",")
+}
+
+// Type implements the pflag.Value interface
+func (e pluginParameters) Type() string {
+ return "map[string]string"
+}
+
+// Set implements the pflag.Value interface
+func (e *pluginParameters) Set(val string) error {
+ entries := strings.Split(val, ",")
+ result := make(map[string]string, len(entries))
+ for _, entry := range entries {
+ if len(entry) == 0 {
+ continue
+ }
+
+ before, after, _ := strings.Cut(entry, "=")
+ result[before] = after
+ }
+ *e = result
+ return nil
+}
diff --git a/internal/cmd/plugin/backup/parameters_test.go b/internal/cmd/plugin/backup/parameters_test.go
new file mode 100644
index 0000000000..4b4e44cc22
--- /dev/null
+++ b/internal/cmd/plugin/backup/parameters_test.go
@@ -0,0 +1,53 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package backup
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("plugin parameters parsing", func() {
+ DescribeTable(
+ "plugin parameters and values table",
+ func(value string, expectedParams pluginParameters) {
+ var params pluginParameters
+ Expect(params.Set(value)).ToNot(HaveOccurred())
+ Expect(params).To(HaveLen(len(expectedParams)))
+ for k, v := range expectedParams {
+ Expect(params).To(HaveKeyWithValue(k, v))
+ }
+ },
+ Entry("empty value", "", nil),
+ Entry("singleton", "a=b", map[string]string{
+ "a": "b",
+ }),
+ Entry("singleton without value", "a", map[string]string{
+ "a": "",
+ }),
+ Entry("set", "a=b,c=d", map[string]string{
+ "a": "b",
+ "c": "d",
+ }),
+ Entry("set with elements without value", "a=b,c,d=,e=f", map[string]string{
+ "a": "b",
+ "c": "",
+ "d": "",
+ "e": "f",
+ }),
+ )
+})
diff --git a/internal/cmd/plugin/backup/suite_test.go b/internal/cmd/plugin/backup/suite_test.go
new file mode 100644
index 0000000000..2dd5a10241
--- /dev/null
+++ b/internal/cmd/plugin/backup/suite_test.go
@@ -0,0 +1,30 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package backup
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestCerts(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "CNPG Backup subcommand tests")
+}
From 07008a8ca57518039c1a5ee0e84c8d76787fb849 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 22 Nov 2024 14:32:42 +0100
Subject: [PATCH 156/836] perf(wal-archive): remove typed client from cmd
execution (#6066)
Performance improvement: the `wal-archive` command in the instance
manager now uses the local webserver `controller-runtime` client instead
of creating a new one.
Closes #5366
Signed-off-by: Armando Ruocco
Signed-off-by: Marco Nenciarini
Co-authored-by: Marco Nenciarini
---
internal/cmd/manager/walarchive/cmd.go | 42 ++-------
pkg/management/postgres/webserver/local.go | 93 +++++++++++++++----
.../postgres/webserver/local_client.go | 63 +++++++++++++
pkg/management/url/url.go | 3 +
4 files changed, 151 insertions(+), 50 deletions(-)
create mode 100644 pkg/management/postgres/webserver/local_client.go
diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go
index 7ff6adf958..fef861040f 100644
--- a/internal/cmd/manager/walarchive/cmd.go
+++ b/internal/cmd/manager/walarchive/cmd.go
@@ -31,7 +31,6 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/spf13/cobra"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
@@ -39,9 +38,8 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management"
pgManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -70,49 +68,27 @@ func NewCmd() *cobra.Command {
return err
}
- typedClient, err := management.NewControllerRuntimeClient()
- if err != nil {
- contextLog.Error(err, "creating controller-runtine client")
- return err
- }
-
- cluster, err := cacheClient.GetCluster()
- if err != nil {
- return fmt.Errorf("failed to get cluster: %w", err)
+ cluster, errCluster := cacheClient.GetCluster()
+ if errCluster != nil {
+ return fmt.Errorf("failed to get cluster: %w", errCluster)
}
- err = run(ctx, podName, pgData, cluster, args)
- if err != nil {
+ if err := run(ctx, podName, pgData, cluster, args); err != nil {
if errors.Is(err, errSwitchoverInProgress) {
contextLog.Warning("Refusing to archive WALs until the switchover is not completed",
"err", err)
} else {
contextLog.Error(err, logErrorMessage)
}
-
- condition := metav1.Condition{
- Type: string(apiv1.ConditionContinuousArchiving),
- Status: metav1.ConditionFalse,
- Reason: string(apiv1.ConditionReasonContinuousArchivingFailing),
- Message: err.Error(),
- }
- if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil {
- contextLog.Error(errCond, "Error changing wal archiving condition (wal archiving failed)")
+ if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); err != nil {
+ contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint")
}
return err
}
- // Update the condition if needed.
- condition := metav1.Condition{
- Type: string(apiv1.ConditionContinuousArchiving),
- Status: metav1.ConditionTrue,
- Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess),
- Message: "Continuous archiving is working",
- }
- if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil {
- contextLog.Error(errCond, "Error changing wal archiving condition (wal archiving succeeded)")
+ if err := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, ""); err != nil {
+ contextLog.Error(err, "while invoking the set wal archive condition endpoint")
}
-
return nil
},
}
diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go
index 7981576c9f..0d15f851ca 100644
--- a/pkg/management/postgres/webserver/local.go
+++ b/pkg/management/postgres/webserver/local.go
@@ -27,11 +27,13 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
apierrs "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
@@ -58,6 +60,7 @@ func NewLocalWebServer(
serveMux := http.NewServeMux()
serveMux.HandleFunc(url.PathCache, endpoints.serveCache)
serveMux.HandleFunc(url.PathPgBackup, endpoints.requestBackup)
+ serveMux.HandleFunc(url.PathWALArchiveStatusCondition, endpoints.setWALArchiveStatusCondition)
server := &http.Server{
Addr: fmt.Sprintf("localhost:%d", url.LocalPort),
@@ -80,15 +83,7 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req
var js []byte
switch requestedObject {
case cache.ClusterKey:
- var cluster apiv1.Cluster
- err := ws.typedClient.Get(
- r.Context(),
- client.ObjectKey{
- Name: ws.instance.GetClusterName(),
- Namespace: ws.instance.GetNamespaceName(),
- },
- &cluster,
- )
+ cluster, err := ws.getCluster(r.Context())
if apierrs.IsNotFound(err) {
w.WriteHeader(http.StatusNotFound)
return
@@ -98,7 +93,7 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req
return
}
- js, err = json.Marshal(&cluster)
+ js, err = json.Marshal(cluster)
if err != nil {
log.Error(err, "while marshalling the cluster")
w.WriteHeader(http.StatusInternalServerError)
@@ -133,7 +128,6 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req
// This function schedule a backup
func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.Request) {
- var cluster apiv1.Cluster
var backup apiv1.Backup
ctx := context.Background()
@@ -144,10 +138,8 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.
return
}
- if err := ws.typedClient.Get(ctx, client.ObjectKey{
- Namespace: ws.instance.GetNamespaceName(),
- Name: ws.instance.GetClusterName(),
- }, &cluster); err != nil {
+ cluster, err := ws.getCluster(ctx)
+ if err != nil {
http.Error(
w,
fmt.Sprintf("error while getting cluster: %v", err.Error()),
@@ -173,7 +165,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.
return
}
- if err := ws.startBarmanBackup(ctx, &cluster, &backup); err != nil {
+ if err := ws.startBarmanBackup(ctx, cluster, &backup); err != nil {
http.Error(
w,
fmt.Sprintf("error while requesting backup: %v", err.Error()),
@@ -188,7 +180,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.
return
}
- ws.startPluginBackup(ctx, &cluster, &backup)
+ ws.startPluginBackup(ctx, cluster, &backup)
_, _ = fmt.Fprint(w, "OK")
default:
@@ -199,6 +191,17 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.
}
}
+func (ws *localWebserverEndpoints) getCluster(ctx context.Context) (*apiv1.Cluster, error) {
+ var cluster apiv1.Cluster
+ if err := ws.typedClient.Get(ctx, client.ObjectKey{
+ Namespace: ws.instance.GetNamespaceName(),
+ Name: ws.instance.GetClusterName(),
+ }, &cluster); err != nil {
+ return nil, err
+ }
+ return &cluster, nil
+}
+
func (ws *localWebserverEndpoints) startBarmanBackup(
ctx context.Context,
cluster *apiv1.Cluster,
@@ -236,3 +239,59 @@ func (ws *localWebserverEndpoints) startPluginBackup(
ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute)
NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx)
}
+
+// ArchiveStatusRequest is the request body for the archive status endpoint
+type ArchiveStatusRequest struct {
+ Error string `json:"error,omitempty"`
+}
+
+func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() *metav1.Condition {
+ if asr.Error != "" {
+ return &metav1.Condition{
+ Type: string(apiv1.ConditionContinuousArchiving),
+ Status: metav1.ConditionFalse,
+ Reason: string(apiv1.ConditionReasonContinuousArchivingFailing),
+ Message: asr.Error,
+ }
+ }
+
+ return &metav1.Condition{
+ Type: string(apiv1.ConditionContinuousArchiving),
+ Status: metav1.ConditionTrue,
+ Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess),
+ Message: "Continuous archiving is working",
+ }
+}
+
+func (ws *localWebserverEndpoints) setWALArchiveStatusCondition(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ contextLogger := log.FromContext(ctx)
+ // decode body req
+ var asr ArchiveStatusRequest
+ if err := json.NewDecoder(r.Body).Decode(&asr); err != nil {
+ contextLogger.Error(err, "error while decoding request")
+ http.Error(w, fmt.Sprintf("error while decoding request: %v", err.Error()), http.StatusBadRequest)
+ return
+ }
+
+ cluster, err := ws.getCluster(ctx)
+ if err != nil {
+ http.Error(
+ w,
+ fmt.Sprintf("error while getting cluster: %v", err.Error()),
+ http.StatusInternalServerError)
+ return
+ }
+
+ if errCond := conditions.Patch(ctx, ws.typedClient, cluster, asr.getContinuousArchivingCondition()); errCond != nil {
+ contextLogger.Error(errCond, "Error changing wal archiving condition",
+ "condition", asr.getContinuousArchivingCondition())
+ http.Error(
+ w,
+ fmt.Sprintf("error while updating wal archiving condition: %v", errCond.Error()),
+ http.StatusInternalServerError)
+ return
+ }
+
+ _, _ = fmt.Fprint(w, "OK")
+}
diff --git a/pkg/management/postgres/webserver/local_client.go b/pkg/management/postgres/webserver/local_client.go
new file mode 100644
index 0000000000..4f2ec068f5
--- /dev/null
+++ b/pkg/management/postgres/webserver/local_client.go
@@ -0,0 +1,63 @@
+package webserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "time"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
+)
+
+// LocalClient is an entity capable of interacting with the local webserver endpoints
+type LocalClient interface {
+ // SetWALArchiveStatusCondition sets the wal-archive status condition.
+ // An empty errMessage means that the archive process was successful.
+ // Returns any error encountered during the request.
+ SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error
+}
+
+type localClient struct {
+ cli *http.Client
+}
+
+// NewLocalClient returns a new instance of LocalClient
+func NewLocalClient() LocalClient {
+ const connectionTimeout = 2 * time.Second
+ const requestTimeout = 30 * time.Second
+
+ return &localClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)}
+}
+
+func (c *localClient) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error {
+ contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition)
+
+ asr := ArchiveStatusRequest{
+ Error: errMessage,
+ }
+
+ encoded, err := json.Marshal(&asr)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.Post(
+ url.Local(url.PathWALArchiveStatusCondition, url.LocalPort),
+ "application/json",
+ bytes.NewBuffer(encoded),
+ )
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if errClose := resp.Body.Close(); errClose != nil {
+ contextLogger.Error(err, "while closing response body")
+ }
+ }()
+
+ return nil
+}
diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go
index b72e89d9d1..31e3b4cead 100644
--- a/pkg/management/url/url.go
+++ b/pkg/management/url/url.go
@@ -43,6 +43,9 @@ const (
// PathPgStatus is the URL path for PostgreSQL Status
PathPgStatus string = "/pg/status"
+ // PathWALArchiveStatusCondition is the URL path for setting the wal-archive condition on the Cluster object
+ PathWALArchiveStatusCondition string = "/cluster/status/condition/wal/archive"
+
// PathPgBackup is the URL path for PostgreSQL Backup
PathPgBackup string = "/pg/backup"
From a03c9ffea127a2c96fd6b343e36fae54d60b8e33 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Fri, 22 Nov 2024 16:42:38 +0100
Subject: [PATCH 157/836] fix: avoid injecting plugin environment while
restoring a backup (#6144)
Signed-off-by: Leonardo Cecchi
---
go.mod | 2 +-
go.sum | 4 ++--
pkg/management/postgres/restore.go | 14 +++++++++++++-
3 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/go.mod b/go.mod
index 14918aad86..187b8fd06e 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0
- github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836
+ github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
diff --git a/go.sum b/go.sum
index d9cc553609..8c152fd74c 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5
github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
-github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 h1:Hhg+I2QcaPNN5XaSsYb7Xw3PbQlvCA9eDY+SvVf902Q=
-github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns=
+github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2 h1:Je4vgmwTN9JIyWzQ4utFw3eQ3eP5sah/d7aS+U9bUhU=
+github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go
index 1e43cb6996..dc72d03ee7 100644
--- a/pkg/management/postgres/restore.go
+++ b/pkg/management/postgres/restore.go
@@ -38,6 +38,7 @@ import (
barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials"
barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer"
restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job"
+ "github.com/cloudnative-pg/machinery/pkg/envmap"
"github.com/cloudnative-pg/machinery/pkg/execlog"
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
@@ -267,7 +268,18 @@ func (info InitInfo) Restore(ctx context.Context) error {
if res == nil {
return errors.New("empty response from restoreViaPlugin, programmatic error")
}
- envs = res.Envs
+
+ processEnvironment, err := envmap.ParseEnviron()
+ if err != nil {
+ return fmt.Errorf("error while parsing the process environment: %w", err)
+ }
+
+ pluginEnvironment, err := envmap.Parse(res.Envs)
+ if err != nil {
+ return fmt.Errorf("error while parsing the plugin environment: %w", err)
+ }
+
+ envs = envmap.Merge(processEnvironment, pluginEnvironment).StringSlice()
config = res.RestoreConfig
} else {
// Before starting the restore we check if the archive destination is safe to use
From 333d561ff5e4680337f2a07d879370cf9b4bf4c6 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Fri, 22 Nov 2024 17:07:16 +0100
Subject: [PATCH 158/836] docs: complete the database feature documentation
(#5647)
Closes #5587
Signed-off-by: Jaime Silvela
Signed-off-by: Gabriele Bartolini
Signed-off-by: Marco Nenciarini
Co-authored-by: Gabriele Bartolini
Co-authored-by: Marco Nenciarini
---
.wordlist-en-custom.txt | 1 +
api/v1/cluster_webhook.go | 2 +-
docs/mkdocs.yml | 1 +
docs/src/declarative_database_management.md | 205 +++++++++++++++---
docs/src/declarative_role_management.md | 2 +-
docs/src/index.md | 1 +
docs/src/operator_capability_levels.md | 12 +-
.../cmd/manager/instance/pgbasebackup/cmd.go | 2 +-
tests/e2e/asserts_test.go | 2 +-
9 files changed, 195 insertions(+), 33 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 51fefca5de..9bc6770ca5 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -1128,6 +1128,7 @@ scheduledbackupspec
scheduledbackupstatus
schedulerName
schemaOnly
+schemas
sdk
searchAttribute
searchFilter
diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go
index f3f8ee9d63..0d6da62c95 100644
--- a/api/v1/cluster_webhook.go
+++ b/api/v1/cluster_webhook.go
@@ -102,7 +102,7 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) {
r.Spec.Bootstrap = &BootstrapConfiguration{}
}
- // Defaulting initDB if no other boostrap method was passed
+ // Defaulting initDB if no other bootstrap method was passed
switch {
case r.Spec.Bootstrap.Recovery != nil:
r.defaultRecovery()
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 471250cf7d..b9808c650d 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -39,6 +39,7 @@ nav:
- service_management.md
- postgresql_conf.md
- declarative_role_management.md
+ - declarative_database_management.md
- tablespaces.md
- operator_conf.md
- cluster_conf.md
diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md
index b740292660..52e9b2c76e 100644
--- a/docs/src/declarative_database_management.md
+++ b/docs/src/declarative_database_management.md
@@ -1,21 +1,38 @@
-# Declarative Database Management
+# PostgreSQL Database Management
-Declarative database management enables users to control the lifecycle of
-databases via a new Custom Resource Definition (CRD) called `Database`.
+CloudNativePG simplifies PostgreSQL database provisioning by automatically
+creating an application database named `app` by default. This default behavior
+is explained in the ["Bootstrap an Empty Cluster"](bootstrap.md#bootstrap-an-empty-cluster-initdb)
+section.
-A `Database` object is managed by the instance manager of the cluster's
-primary instance. This feature is not supported in replica clusters,
-as replica clusters lack a primary instance to manage the `Database` object.
+For more advanced use cases, CloudNativePG introduces **declarative database
+management**, which empowers users to define and control the lifecycle of
+PostgreSQL databases using the `Database` Custom Resource Definition (CRD).
+This method seamlessly integrates with Kubernetes, providing a scalable,
+automated, and consistent approach to managing PostgreSQL databases.
-### Example: Simple Database Declaration
+---
-Below is an example of a basic `Database` configuration:
+## Key Concepts
+
+### Scope of Management
+
+!!! Important
+ CloudNativePG manages **global objects** in PostgreSQL clusters, such as
+ databases, roles, and tablespaces. However, it does **not** manage the content
+ of databases (e.g., schemas and tables). For database content, specialized
+ tools or the applications themselves should be used.
+
+### Declarative `Database` Manifest
+
+The following example demonstrates how a `Database` resource interacts with a
+`Cluster`:
```yaml
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
- name: db-one
+ name: cluster-example-one
spec:
name: one
owner: app
@@ -23,32 +40,91 @@ spec:
name: cluster-example
```
-Once the reconciliation cycle is completed successfully, the `Database`
-status will show a `applied` field set to `true` and an empty `message` field.
+When applied, this manifest creates a `Database` object called
+`cluster-example-one` requesting a database named `one`, owned by the `app`
+role, in the `cluster-example` PostgreSQL cluster.
+
+!!! Info
+ Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-DatabaseSpec)
+ the full list of attributes you can define for each `Database` object.
+
+### Required Fields in the `Database` Manifest
+
+- `metadata.name`: Unique name of the Kubernetes object within its namespace.
+- `spec.name`: Name of the database as it will appear in PostgreSQL.
+- `spec.owner`: PostgreSQL role that owns the database.
+- `spec.cluster.name`: Name of the target PostgreSQL cluster.
+
+The `Database` object must reference a specific `Cluster`, determining where
+the database will be created. It is managed by the cluster's primary instance,
+ensuring the database is created or updated as needed.
-### Database Deletion and Reclaim Policies
+!!! Info
+ The distinction between `metadata.name` and `spec.name` allows multiple
+ `Database` resources to reference databases with the same name across different
+ CloudNativePG clusters in the same Kubernetes namespace.
-A finalizer named `cnpg.io/deleteDatabase` is automatically added
-to each `Database` object to control its deletion process.
+## Reserved Database Names
-By default, the `databaseReclaimPolicy` is set to `retain`, which means
-that if the `Database` object is deleted, the actual PostgreSQL database
-is retained for manual management by an administrator.
+PostgreSQL automatically creates databases such as `postgres`, `template0`, and
+`template1`. These names are reserved and cannot be used for new `Database`
+objects in CloudNativePG.
-Alternatively, if the `databaseReclaimPolicy` is set to `delete`,
-the PostgreSQL database will be automatically deleted when the `Database`
-object is removed.
+!!! Important
+ Creating a `Database` with `spec.name` set to `postgres`, `template0`, or
+ `template1` is not allowed.
-### Example: Database with Delete Reclaim Policy
+## Reconciliation and Status
-The following example illustrates a `Database` object with a `delete`
-reclaim policy:
+Once a `Database` object is reconciled successfully:
+
+- `status.applied` will be set to `true`.
+- `status.observedGeneration` will match the `metadata.generation` of the last
+ applied configuration.
+
+Example of a reconciled `Database` object:
```yaml
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
- name: db-one-with-delete-reclaim-policy
+ generation: 1
+ name: cluster-example-one
+spec:
+ cluster:
+ name: cluster-example
+ name: one
+ owner: app
+status:
+ observedGeneration: 1
+ applied: true
+```
+
+If an error occurs during reconciliation, `status.applied` will be `false`, and
+an error message will be included in the `status.message` field.
+
+## Deleting a Database
+
+CloudNativePG supports two methods for database deletion:
+
+1. Using the `delete` reclaim policy
+2. Declaratively setting the database's `ensure` field to `absent`
+
+### Deleting via `delete` Reclaim Policy
+
+The `databaseReclaimPolicy` field determines the behavior when a `Database`
+object is deleted:
+
+- `retain` (default): The database remains in PostgreSQL for manual management.
+- `delete`: The database is automatically removed from PostgreSQL.
+
+Example:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Database
+metadata:
+ name: cluster-example-two
spec:
databaseReclaimPolicy: delete
name: two
@@ -57,4 +133,83 @@ spec:
name: cluster-example
```
-In this case, when the `Database` object is deleted, the corresponding PostgreSQL database will also be removed automatically.
+Deleting this `Database` object will automatically remove the `two` database
+from the `cluster-example` cluster.
+
+### Declaratively Setting `ensure: absent`
+
+To remove a database, set the `ensure` field to `absent` like in the following
+example:.
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Database
+metadata:
+ name: cluster-example-database-to-drop
+spec:
+ cluster:
+ name: cluster-example
+ name: database-to-drop
+ owner: app
+ ensure: absent
+```
+
+This manifest ensures that the `database-to-drop` database is removed from the
+`cluster-example` cluster.
+
+## Limitations and Caveats
+
+### Renaming a database
+
+While CloudNativePG adheres to PostgreSQL’s
+[CREATE DATABASE](https://www.postgresql.org/docs/current/sql-createdatabase.html) and
+[ALTER DATABASE](https://www.postgresql.org/docs/current/sql-alterdatabase.html)
+commands, **renaming databases is not supported**.
+Attempting to modify `spec.name` in an existing `Database` object will result
+in rejection by Kubernetes.
+
+### Creating vs. Altering a Database
+
+- For new databases, CloudNativePG uses the `CREATE DATABASE` statement.
+- For existing databases, `ALTER DATABASE` is used to apply changes.
+
+It is important to note that there are some differences between these two
+Postgres commands: in particular, the options accepted by `ALTER` are a subset
+of those accepted by `CREATE`.
+
+!!! Warning
+ Some fields, such as encoding and collation settings, are immutable in
+ PostgreSQL. Attempts to modify these fields on existing databases will be
+ ignored.
+
+### Replica Clusters
+
+Database objects declared on replica clusters cannot be enforced, as replicas
+lack write privileges. These objects will remain in a pending state until the
+replica is promoted.
+
+### Conflict Resolution
+
+If two `Database` objects in the same namespace manage the same PostgreSQL
+database (i.e., identical `spec.name` and `spec.cluster.name`), the second
+object will be rejected.
+
+Example status message:
+
+```yaml
+status:
+ applied: false
+ message: 'reconciliation error: database "one" is already managed by Database object "cluster-example-one"'
+```
+
+### Postgres Version Differences
+
+CloudNativePG adheres to PostgreSQL's capabilities. For example, features like
+`ICU_RULES` introduced in PostgreSQL 16 are unavailable in earlier versions.
+Errors from PostgreSQL will be reflected in the `Database` object's `status`.
+
+### Manual Changes
+
+CloudNativePG does not overwrite manual changes to databases. Once reconciled,
+a `Database` object will not be reapplied unless its `metadata.generation`
+changes, giving flexibility for direct PostgreSQL modifications.
diff --git a/docs/src/declarative_role_management.md b/docs/src/declarative_role_management.md
index 04b328c977..2c0c109cbc 100644
--- a/docs/src/declarative_role_management.md
+++ b/docs/src/declarative_role_management.md
@@ -1,4 +1,4 @@
-# Database Role Management
+# PostgreSQL Role Management
From its inception, CloudNativePG has managed the creation of specific roles
required in PostgreSQL instances:
diff --git a/docs/src/index.md b/docs/src/index.md
index 641c074d2f..815dc0af85 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -87,6 +87,7 @@ Additionally, the community provides images for the [PostGIS extension](postgis.
Postgres extensions through the cluster `spec`: `pgaudit`, `auto_explain`,
`pg_stat_statements`, and `pg_failover_slots`
* Declarative management of Postgres roles, users and groups
+* Declarative management of Postgres databases
* Support for Local Persistent Volumes with PVC templates
* Reuse of Persistent Volumes storage in Pods
* Separate volumes for WAL files and tablespaces
diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md
index ce7f31e8c0..3ef8b0f5d8 100644
--- a/docs/src/operator_capability_levels.md
+++ b/docs/src/operator_capability_levels.md
@@ -136,14 +136,18 @@ PostgreSQL outside Kubernetes. This is particularly useful for DBaaS purposes.
### Database configuration
-The operator is designed to manage a PostgreSQL cluster with a single
-database. The operator transparently manages access to the database through
-three Kubernetes services provisioned and managed for read-write,
+The operator is designed to bootstrap a PostgreSQL cluster with a single
+database. The operator transparently manages network access to the cluster
+through three Kubernetes services provisioned and managed for read-write,
read, and read-only workloads.
Using the convention-over-configuration approach, the operator creates a
database called `app`, by default owned by a regular Postgres user with the
same name. You can specify both the database name and the user name, if
-required.
+required, as part of the bootstrap.
+
+Additional databases can be created or managed via
+[declarative database management](declarative_database_management.md) using
+the `Database` CRD.
Although no configuration is required to run the cluster, you can customize
both PostgreSQL runtime configuration and PostgreSQL host-based
diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go
index 3f9d9719e1..57a92cd83c 100644
--- a/internal/cmd/manager/instance/pgbasebackup/cmd.go
+++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go
@@ -77,7 +77,7 @@ func NewCmd() *cobra.Command {
}
if err = env.bootstrapUsingPgbasebackup(ctx); err != nil {
- contextLogger.Error(err, "Unable to boostrap cluster")
+ contextLogger.Error(err, "Unable to bootstrap cluster")
}
return err
},
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 685630691f..37c0c0276d 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -975,7 +975,7 @@ func AssertReplicaModeCluster(
// AssertDetachReplicaModeCluster verifies that a replica cluster can be detached from the
// source cluster, and its target primary can be promoted. As such, new write operation
// on the source cluster shouldn't be received anymore by the detached replica cluster.
-// Also, make sure the boostrap fields database and owner of the replica cluster are
+// Also, make sure the bootstrap fields database and owner of the replica cluster are
// properly ignored
func AssertDetachReplicaModeCluster(
namespace,
From fda635b550f3c05d81326b9ee1d9eb64f66b8954 Mon Sep 17 00:00:00 2001
From: Peggie
Date: Sat, 23 Nov 2024 19:37:14 +0100
Subject: [PATCH 159/836] feat: Public Cloud K8S versions update (#6129)
Update the versions used to test the operator on public cloud providers
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: public-cloud-k8s-versions-check
---
.github/eks_versions.json | 3 +--
.github/kind_versions.json | 8 ++++----
2 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/.github/eks_versions.json b/.github/eks_versions.json
index 537d9f2b64..3121122733 100644
--- a/.github/eks_versions.json
+++ b/.github/eks_versions.json
@@ -2,6 +2,5 @@
"1.31",
"1.30",
"1.29",
- "1.28",
- "1.27"
+ "1.28"
]
diff --git a/.github/kind_versions.json b/.github/kind_versions.json
index d9bd1a1215..85547c7125 100644
--- a/.github/kind_versions.json
+++ b/.github/kind_versions.json
@@ -1,7 +1,7 @@
[
- "v1.31.1",
- "v1.30.4",
- "v1.29.8",
- "v1.28.13",
+ "v1.31.2",
+ "v1.30.6",
+ "v1.29.10",
+ "v1.28.15",
"v1.27.16"
]
From 5e80915aff117dc8e92b2914ac3097b1c4fc090d Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Sat, 23 Nov 2024 20:08:13 +0100
Subject: [PATCH 160/836] feat: update default PostgreSQL version to 17.2
(#6018)
Update default PostgreSQL version from 17.0 to 17.2
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: postgres-versions-updater
---
.github/pg_versions.json | 24 +++++++++++-----------
docs/src/bootstrap.md | 10 ++++-----
docs/src/declarative_hibernation.md | 2 +-
docs/src/image_catalog.md | 4 ++--
docs/src/kubectl-plugin.md | 4 ++--
docs/src/monitoring.md | 2 +-
docs/src/postgis.md | 2 +-
docs/src/samples/cluster-example-full.yaml | 2 +-
docs/src/scheduling.md | 2 +-
docs/src/ssl_connections.md | 2 +-
docs/src/troubleshooting.md | 4 ++--
pkg/versions/versions.go | 2 +-
12 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/.github/pg_versions.json b/.github/pg_versions.json
index fa6d5ae4ed..119882aec3 100644
--- a/.github/pg_versions.json
+++ b/.github/pg_versions.json
@@ -1,26 +1,26 @@
{
"17": [
- "17.0",
- "17.0-27"
+ "17.2",
+ "17.1"
],
"16": [
- "16.4",
- "16.3"
+ "16.6",
+ "16.5"
],
"15": [
- "15.8",
- "15.7"
+ "15.10",
+ "15.9"
],
"14": [
- "14.13",
- "14.12"
+ "14.15",
+ "14.14"
],
"13": [
- "13.16",
- "13.15"
+ "13.18",
+ "13.17"
],
"12": [
- "12.20",
- "12.19"
+ "12.22",
+ "12.21"
]
}
\ No newline at end of file
diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md
index 87525b4679..6aff83a8a6 100644
--- a/docs/src/bootstrap.md
+++ b/docs/src/bootstrap.md
@@ -519,7 +519,7 @@ file on the source PostgreSQL instance:
host replication streaming_replica all md5
```
-The following manifest creates a new PostgreSQL 17.0 cluster,
+The following manifest creates a new PostgreSQL 17.2 cluster,
called `target-db`, using the `pg_basebackup` bootstrap method
to clone an external PostgreSQL cluster defined as `source-db`
(in the `externalClusters` array). As you can see, the `source-db`
@@ -534,7 +534,7 @@ metadata:
name: target-db
spec:
instances: 3
- imageName: ghcr.io/cloudnative-pg/postgresql:17.0
+ imageName: ghcr.io/cloudnative-pg/postgresql:17.2
bootstrap:
pg_basebackup:
@@ -554,7 +554,7 @@ spec:
```
All the requirements must be met for the clone operation to work, including
-the same PostgreSQL version (in our case 17.0).
+the same PostgreSQL version (in our case 17.2).
#### TLS certificate authentication
@@ -569,7 +569,7 @@ in the same Kubernetes cluster.
This example can be easily adapted to cover an instance that resides
outside the Kubernetes cluster.
-The manifest defines a new PostgreSQL 17.0 cluster called `cluster-clone-tls`,
+The manifest defines a new PostgreSQL 17.2 cluster called `cluster-clone-tls`,
which is bootstrapped using the `pg_basebackup` method from the `cluster-example`
external cluster. The host is identified by the read/write service
in the same cluster, while the `streaming_replica` user is authenticated
@@ -584,7 +584,7 @@ metadata:
name: cluster-clone-tls
spec:
instances: 3
- imageName: ghcr.io/cloudnative-pg/postgresql:17.0
+ imageName: ghcr.io/cloudnative-pg/postgresql:17.2
bootstrap:
pg_basebackup:
diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md
index 1b7a64f7af..4df6e3403d 100644
--- a/docs/src/declarative_hibernation.md
+++ b/docs/src/declarative_hibernation.md
@@ -58,7 +58,7 @@ $ kubectl cnpg status
Cluster Summary
Name: cluster-example
Namespace: default
-PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0
+PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.2
Primary instance: cluster-example-2
Status: Cluster in healthy state
Instances: 3
diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md
index a84890a480..6078124fa6 100644
--- a/docs/src/image_catalog.md
+++ b/docs/src/image_catalog.md
@@ -32,7 +32,7 @@ spec:
- major: 15
image: ghcr.io/cloudnative-pg/postgresql:15.6
- major: 16
- image: ghcr.io/cloudnative-pg/postgresql:17.0
+ image: ghcr.io/cloudnative-pg/postgresql:17.2
```
**Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:**
@@ -47,7 +47,7 @@ spec:
- major: 15
image: ghcr.io/cloudnative-pg/postgresql:15.6
- major: 16
- image: ghcr.io/cloudnative-pg/postgresql:17.0
+ image: ghcr.io/cloudnative-pg/postgresql:17.2
```
A `Cluster` resource has the flexibility to reference either an `ImageCatalog`
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index 793fd706ff..e066bbc082 100755
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -1006,7 +1006,7 @@ it from the actual pod. This means that you will be using the `postgres` user.
```console
$ kubectl cnpg psql cluster-example
-psql (17.0 (Debian 17.0-1.pgdg110+1))
+psql (17.2 (Debian 17.2-1.pgdg110+1))
Type "help" for help.
postgres=#
@@ -1018,7 +1018,7 @@ select to work against a replica by using the `--replica` option:
```console
$ kubectl cnpg psql --replica cluster-example
-psql (17.0 (Debian 17.0-1.pgdg110+1))
+psql (17.2 (Debian 17.2-1.pgdg110+1))
Type "help" for help.
diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md
index 7814949e06..3fa83cb2f6 100644
--- a/docs/src/monitoring.md
+++ b/docs/src/monitoring.md
@@ -217,7 +217,7 @@ cnpg_collector_up{cluster="cluster-example"} 1
# HELP cnpg_collector_postgres_version Postgres version
# TYPE cnpg_collector_postgres_version gauge
-cnpg_collector_postgres_version{cluster="cluster-example",full="17.0"} 17.0
+cnpg_collector_postgres_version{cluster="cluster-example",full="17.2"} 17.2
# HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp
# TYPE cnpg_collector_last_failed_backup_timestamp gauge
diff --git a/docs/src/postgis.md b/docs/src/postgis.md
index a31fb607c7..cd139ac5d7 100644
--- a/docs/src/postgis.md
+++ b/docs/src/postgis.md
@@ -100,7 +100,7 @@ values from the ones in this document):
```console
$ kubectl exec -ti postgis-example-1 -- psql app
Defaulted container "postgres" out of: postgres, bootstrap-controller (init)
-psql (17.0 (Debian 17.0-1.pgdg110+1))
+psql (17.2 (Debian 17.2-1.pgdg110+1))
Type "help" for help.
app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1;
diff --git a/docs/src/samples/cluster-example-full.yaml b/docs/src/samples/cluster-example-full.yaml
index f0fa1fe10e..321e94a2fe 100644
--- a/docs/src/samples/cluster-example-full.yaml
+++ b/docs/src/samples/cluster-example-full.yaml
@@ -35,7 +35,7 @@ metadata:
name: cluster-example-full
spec:
description: "Example of cluster"
- imageName: ghcr.io/cloudnative-pg/postgresql:17.0
+ imageName: ghcr.io/cloudnative-pg/postgresql:17.2
# imagePullSecret is only required if the images are located in a private registry
# imagePullSecrets:
# - name: private_registry_access
diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md
index 57eb71b69b..a681f412a6 100644
--- a/docs/src/scheduling.md
+++ b/docs/src/scheduling.md
@@ -40,7 +40,7 @@ metadata:
name: cluster-example
spec:
instances: 3
- imageName: ghcr.io/cloudnative-pg/postgresql:17.0
+ imageName: ghcr.io/cloudnative-pg/postgresql:17.2
affinity:
enablePodAntiAffinity: true # Default value
diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md
index b11ea1a620..3762ab95d8 100644
--- a/docs/src/ssl_connections.md
+++ b/docs/src/ssl_connections.md
@@ -173,7 +173,7 @@ Output:
version
--------------------------------------------------------------------------------------
------------------
-PostgreSQL 17.0 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
+PostgreSQL 17.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
8.3.1-5), 64-bit
(1 row)
```
diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md
index fc6f59c39f..aa67c8be17 100644
--- a/docs/src/troubleshooting.md
+++ b/docs/src/troubleshooting.md
@@ -220,7 +220,7 @@ Cluster in healthy state
Name: cluster-example
Namespace: default
System ID: 7044925089871458324
-PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0-3
+PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.2-3
Primary instance: cluster-example-1
Instances: 3
Ready instances: 3
@@ -288,7 +288,7 @@ kubectl describe cluster -n | grep "Image Name"
Output:
```shell
- Image Name: ghcr.io/cloudnative-pg/postgresql:17.0-3
+ Image Name: ghcr.io/cloudnative-pg/postgresql:17.2-3
```
!!! Note
diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go
index 9493ffe2a3..6189bdad1f 100644
--- a/pkg/versions/versions.go
+++ b/pkg/versions/versions.go
@@ -23,7 +23,7 @@ const (
Version = "1.24.1"
// DefaultImageName is the default image used by the operator to create pods
- DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.0"
+ DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2"
// DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL
DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1"
From c390c1dccce7ab8b5b817624f42dc3198db2128e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Gr=C3=A9goire=20Bellon-Gervais?=
Date: Sun, 24 Nov 2024 10:01:38 +0100
Subject: [PATCH 161/836] docs: add Docaposte to ADOPTERS.md (#6145)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Docaposte is the digital trust leader in France. We use CloudNativePG because
it is the most elegant and efficient solution for running PostgreSQL in production.
Signed-off-by: Grégoire Bellon-Gervais
---
ADOPTERS.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/ADOPTERS.md b/ADOPTERS.md
index 7ce2afc859..44eb525966 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -57,4 +57,5 @@ This list is sorted in chronological order, based on the submission date.
| [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. |
| [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. |
| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. |
-| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-24 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. |
+| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. |
+| [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. |
From 9eb3cbde1e62c489edb474a952f41a6bd9add8d2 Mon Sep 17 00:00:00 2001
From: Klavs Klavsen
Date: Mon, 25 Nov 2024 14:53:23 +0100
Subject: [PATCH 162/836] docs: add Obmondo to `ADOPTERS.md` (#6162)
At Obmondo we use CloudNativePG in our open-source Kubernetes
meta-management platform called KubeAid to easily manage all
PostgreSQL databases across clusters from a centralized interface.
Signed-off-by: Klavs Klavsen
---
ADOPTERS.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/ADOPTERS.md b/ADOPTERS.md
index 44eb525966..4a4601f02f 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -59,3 +59,4 @@ This list is sorted in chronological order, based on the submission date.
| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. |
| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. |
| [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. |
+| [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. |
From a3b0fdb67384215fa2bdd727cbc391e15481e957 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Tue, 26 Nov 2024 19:04:56 +0100
Subject: [PATCH 163/836] fix: ensure former primary WALs are flushed before
resyncing (#6141)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This patch fixes an issue where the former primary node may fail to
archive its WAL files before resynchronizing as a replica after a
switchover event. This gap in the WAL stream could prevent point-in-time
recovery (PITR) to positions between the current primary node’s
promotion and the next backup. The fix ensures the former primary
flushes and archives all pending WAL files before rejoining the cluster,
maintaining a complete and consistent WAL stream.
Closes #5959
Signed-off-by: Armando Ruocco
Signed-off-by: Leonardo Cecchi
Co-authored-by: Leonardo Cecchi
---
go.mod | 2 +-
go.sum | 4 +-
internal/cmd/manager/walarchive/cmd.go | 214 +-----------
.../controller/instance_controller.go | 2 +-
.../management/controller/instance_startup.go | 6 +
pkg/management/postgres/archiver/archiver.go | 325 ++++++++++++++++++
pkg/management/postgres/archiver/doc.go | 18 +
.../postgres/constants/constants.go | 5 +
pkg/management/postgres/initdb.go | 9 +-
pkg/management/postgres/restore.go | 2 +-
10 files changed, 362 insertions(+), 225 deletions(-)
create mode 100644 pkg/management/postgres/archiver/archiver.go
create mode 100644 pkg/management/postgres/archiver/doc.go
diff --git a/go.mod b/go.mod
index 187b8fd06e..2920230af2 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0
- github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2
+ github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
diff --git a/go.sum b/go.sum
index 8c152fd74c..e372961fb6 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5
github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
-github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2 h1:Je4vgmwTN9JIyWzQ4utFw3eQ3eP5sah/d7aS+U9bUhU=
-github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
+github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs=
+github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go
index fef861040f..23756b5cdd 100644
--- a/internal/cmd/manager/walarchive/cmd.go
+++ b/internal/cmd/manager/walarchive/cmd.go
@@ -18,30 +18,16 @@ limitations under the License.
package walarchive
import (
- "context"
"errors"
"fmt"
"os"
- "path"
- "path/filepath"
- "time"
- barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver"
- "github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
- "github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/spf13/cobra"
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
- "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
- "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
- "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
- pgManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
// errSwitchoverInProgress is raised when there is a switchover in progress
@@ -73,7 +59,7 @@ func NewCmd() *cobra.Command {
return fmt.Errorf("failed to get cluster: %w", errCluster)
}
- if err := run(ctx, podName, pgData, cluster, args); err != nil {
+ if err := archiver.Run(ctx, podName, pgData, cluster, args[0]); err != nil {
if errors.Is(err, errSwitchoverInProgress) {
contextLog.Warning("Refusing to archive WALs until the switchover is not completed",
"err", err)
@@ -98,199 +84,3 @@ func NewCmd() *cobra.Command {
return &cmd
}
-
-func run(
- ctx context.Context,
- podName, pgData string,
- cluster *apiv1.Cluster,
- args []string,
-) error {
- startTime := time.Now()
- contextLog := log.FromContext(ctx)
- walName := args[0]
-
- if cluster.IsReplica() {
- if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary {
- contextLog.Debug("WAL archiving on a replica cluster, "+
- "but this node is not the target primary nor the current one. "+
- "Skipping WAL archiving",
- "walName", walName,
- "currentPrimary", cluster.Status.CurrentPrimary,
- "targetPrimary", cluster.Status.TargetPrimary,
- )
- return nil
- }
- }
-
- if cluster.Status.CurrentPrimary != podName {
- contextLog.Info("Refusing to archive WAL when there is a switchover in progress",
- "currentPrimary", cluster.Status.CurrentPrimary,
- "targetPrimary", cluster.Status.TargetPrimary,
- "podName", podName)
- return errSwitchoverInProgress
- }
-
- // Request the plugins to archive this WAL
- if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil {
- return err
- }
-
- // Request Barman Cloud to archive this WAL
- if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
- // Backup not configured, skipping WAL
- contextLog.Debug("Backup not configured, skip WAL archiving via Barman Cloud",
- "walName", walName,
- "currentPrimary", cluster.Status.CurrentPrimary,
- "targetPrimary", cluster.Status.TargetPrimary,
- )
- return nil
- }
-
- // Get environment from cache
- env, err := cacheClient.GetEnv(cache.WALArchiveKey)
- if err != nil {
- return fmt.Errorf("failed to get envs: %w", err)
- }
-
- maxParallel := 1
- if cluster.Spec.Backup.BarmanObjectStore.Wal != nil {
- maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel
- }
-
- // Create the archiver
- var walArchiver *barmanArchiver.WALArchiver
- if walArchiver, err = barmanArchiver.New(
- ctx,
- env,
- postgres.SpoolDirectory,
- pgData,
- path.Join(pgData, pgManagement.CheckEmptyWalArchiveFile)); err != nil {
- return fmt.Errorf("while creating the archiver: %w", err)
- }
-
- // Step 1: Check if the archive location is safe to perform archiving
- if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) {
- if err := checkWalArchive(ctx, cluster, walArchiver, pgData); err != nil {
- return err
- }
- }
-
- // Step 2: check if this WAL file has not been already archived
- var isDeletedFromSpool bool
- isDeletedFromSpool, err = walArchiver.DeleteFromSpool(walName)
- if err != nil {
- return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err)
- }
- if isDeletedFromSpool {
- contextLog.Info("Archived WAL file (parallel)",
- "walName", walName,
- "currentPrimary", cluster.Status.CurrentPrimary,
- "targetPrimary", cluster.Status.TargetPrimary)
- return nil
- }
-
- // Step 3: gather the WAL files names to archive
- walFilesList := walArchiver.GatherWALFilesToArchive(ctx, walName, maxParallel)
-
- options, err := walArchiver.BarmanCloudWalArchiveOptions(
- ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name)
- if err != nil {
- return err
- }
-
- // Step 5: archive the WAL files in parallel
- uploadStartTime := time.Now()
- walStatus := walArchiver.ArchiveList(ctx, walFilesList, options)
- if len(walStatus) > 1 {
- contextLog.Info("Completed archive command (parallel)",
- "walsCount", len(walStatus),
- "startTime", startTime,
- "uploadStartTime", uploadStartTime,
- "uploadTotalTime", time.Since(uploadStartTime),
- "totalTime", time.Since(startTime))
- }
-
- // We return only the first error to PostgreSQL, because the first error
- // is the one raised by the file that PostgreSQL has requested to archive.
- // The other errors are related to WAL files that were pre-archived as
- // a performance optimization and are just logged
- return walStatus[0].Err
-}
-
-// archiveWALViaPlugins requests every capable plugin to archive the passed
-// WAL file, and returns an error if a configured plugin fails to do so.
-// It will not return an error if there's no plugin capable of WAL archiving
-func archiveWALViaPlugins(
- ctx context.Context,
- cluster *apiv1.Cluster,
- walName string,
-) error {
- contextLogger := log.FromContext(ctx)
-
- plugins := repository.New()
- availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir)
- if err != nil {
- contextLogger.Error(err, "Error while loading local plugins")
- }
- defer plugins.Close()
-
- availablePluginNamesSet := stringset.From(availablePluginNames)
- enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames())
-
- client, err := pluginClient.WithPlugins(
- ctx,
- plugins,
- availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()...,
- )
- if err != nil {
- contextLogger.Error(err, "Error while loading required plugins")
- return err
- }
- defer client.Close(ctx)
-
- return client.ArchiveWAL(ctx, cluster, walName)
-}
-
-// isCheckWalArchiveFlagFilePresent returns true if the file CheckEmptyWalArchiveFile is present in the PGDATA directory
-func isCheckWalArchiveFlagFilePresent(ctx context.Context, pgDataDirectory string) bool {
- contextLogger := log.FromContext(ctx)
- filePath := filepath.Join(pgDataDirectory, pgManagement.CheckEmptyWalArchiveFile)
-
- exists, err := fileutils.FileExists(filePath)
- if err != nil {
- contextLogger.Error(err, "error while checking for the existence of the CheckEmptyWalArchiveFile")
- }
- // If the check empty wal archive file doesn't exist this it's a no-op
- if !exists {
- contextLogger.Debug("WAL check flag file not found, skipping check")
- return false
- }
-
- return exists
-}
-
-func checkWalArchive(
- ctx context.Context,
- cluster *apiv1.Cluster,
- walArchiver *barmanArchiver.WALArchiver,
- pgData string,
-) error {
- contextLogger := log.FromContext(ctx)
- checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions(
- ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name)
- if err != nil {
- contextLogger.Error(err, "while getting barman-cloud-wal-archive options")
- return err
- }
-
- if !isCheckWalArchiveFlagFilePresent(ctx, pgData) {
- return nil
- }
-
- if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil {
- contextLogger.Error(err, "while barman-cloud-check-wal-archive")
- return err
- }
-
- return nil
-}
diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go
index 9a48a4c849..dda32b0920 100644
--- a/internal/management/controller/instance_controller.go
+++ b/internal/management/controller/instance_controller.go
@@ -1009,7 +1009,7 @@ func (r *InstanceReconciler) reconcilePostgreSQLAutoConfFilePermissions(ctx cont
// The file is created immediately after initdb and removed after the
// first WAL is archived
func (r *InstanceReconciler) reconcileCheckWalArchiveFile(cluster *apiv1.Cluster) error {
- filePath := filepath.Join(r.instance.PgData, postgresManagement.CheckEmptyWalArchiveFile)
+ filePath := filepath.Join(r.instance.PgData, constants.CheckEmptyWalArchiveFile)
for _, condition := range cluster.Status.Conditions {
// If our current condition is archiving we can delete the file
if condition.Type == string(apiv1.ConditionContinuousArchiving) && condition.Status == metav1.ConditionTrue {
diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go
index caeeda9c55..ee81ed483c 100644
--- a/internal/management/controller/instance_startup.go
+++ b/internal/management/controller/instance_startup.go
@@ -32,6 +32,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/controller"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver"
postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
)
@@ -256,6 +257,11 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context
err, "Error while changing mode of the postgresql.auto.conf file before pg_rewind, skipped")
}
+ // We archive every WAL that have not been archived from the latest postmaster invocation.
+ if err := archiver.ArchiveAllReadyWALs(ctx, cluster, r.instance.PgData); err != nil {
+ return fmt.Errorf("while ensuring all WAL files are archived: %w", err)
+ }
+
// pg_rewind could require a clean shutdown of the old primary to
// work. Unfortunately, if the old primary is already clean starting
// it up may make it advance in respect to the new one.
diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go
new file mode 100644
index 0000000000..251ca52f5a
--- /dev/null
+++ b/pkg/management/postgres/archiver/archiver.go
@@ -0,0 +1,325 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package archiver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "path"
+ "path/filepath"
+ "time"
+
+ barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver"
+ "github.com/cloudnative-pg/machinery/pkg/fileutils"
+ walUtils "github.com/cloudnative-pg/machinery/pkg/fileutils/wals"
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
+ cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// errSwitchoverInProgress is raised when there is a switchover in progress
+// and the new primary have not completed the promotion
+var errSwitchoverInProgress = fmt.Errorf("switchover in progress, refusing archiving")
+
+// ArchiveAllReadyWALs ensures that all WAL files that are in the "ready"
+// queue have been archived.
+// This is used to ensure that a former primary will archive the WAL files in
+// its queue even in case of an unclean shutdown.
+func ArchiveAllReadyWALs(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ pgData string,
+) error {
+ contextLog := log.FromContext(ctx)
+
+ noWALLeft := errors.New("no wal files to archive")
+
+ iterator := func() error {
+ walList := walUtils.GatherReadyWALFiles(
+ ctx, walUtils.GatherReadyWALFilesConfig{
+ MaxResults: math.MaxInt32 - 1,
+ PgDataPath: pgData,
+ },
+ )
+
+ if len(walList.Ready) > 0 {
+ contextLog.Info(
+ "Detected ready WAL files in a former primary, triggering WAL archiving",
+ "readyWALCount", len(walList.Ready),
+ )
+ contextLog.Debug(
+ "List of ready WALs",
+ "readyWALs", walList.Ready,
+ )
+ }
+
+ for _, wal := range walList.ReadyItemsToSlice() {
+ if err := internalRun(ctx, pgData, cluster, wal); err != nil {
+ return err
+ }
+
+ if err := walList.MarkAsDone(ctx, wal); err != nil {
+ return err
+ }
+ }
+
+ if !walList.HasMoreResults {
+ return noWALLeft
+ }
+
+ return nil
+ }
+
+ for {
+ if err := iterator(); err != nil {
+ if errors.Is(err, noWALLeft) {
+ return nil
+ }
+ return err
+ }
+ }
+}
+
+// Run implements the WAL archiving process given the current cluster definition
+// and the current Pod Name.
+func Run(
+ ctx context.Context,
+ podName, pgData string,
+ cluster *apiv1.Cluster,
+ walName string,
+) error {
+ contextLog := log.FromContext(ctx)
+
+ if cluster.IsReplica() {
+ if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary {
+ contextLog.Debug("WAL archiving on a replica cluster, "+
+ "but this node is not the target primary nor the current one. "+
+ "Skipping WAL archiving",
+ "walName", walName,
+ "currentPrimary", cluster.Status.CurrentPrimary,
+ "targetPrimary", cluster.Status.TargetPrimary,
+ )
+ return nil
+ }
+ }
+
+ if cluster.Status.CurrentPrimary != podName {
+ contextLog.Info("Refusing to archive WAL when there is a switchover in progress",
+ "currentPrimary", cluster.Status.CurrentPrimary,
+ "targetPrimary", cluster.Status.TargetPrimary,
+ "podName", podName)
+ return errSwitchoverInProgress
+ }
+
+ return internalRun(ctx, pgData, cluster, walName)
+}
+
+func internalRun(
+ ctx context.Context,
+ pgData string,
+ cluster *apiv1.Cluster,
+ walName string,
+) error {
+ contextLog := log.FromContext(ctx)
+ startTime := time.Now()
+
+ // Request the plugins to archive this WAL
+ if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil {
+ return err
+ }
+
+ // Request Barman Cloud to archive this WAL
+ if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
+ // Backup not configured, skipping WAL
+ contextLog.Debug("Backup not configured, skip WAL archiving via Barman Cloud",
+ "walName", walName,
+ "currentPrimary", cluster.Status.CurrentPrimary,
+ "targetPrimary", cluster.Status.TargetPrimary,
+ )
+ return nil
+ }
+
+ // Get environment from cache
+ env, err := cacheClient.GetEnv(cache.WALArchiveKey)
+ if err != nil {
+ return fmt.Errorf("failed to get envs: %w", err)
+ }
+
+ maxParallel := 1
+ if cluster.Spec.Backup.BarmanObjectStore.Wal != nil {
+ maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel
+ }
+
+ // Create the archiver
+ var walArchiver *barmanArchiver.WALArchiver
+ if walArchiver, err = barmanArchiver.New(
+ ctx,
+ env,
+ postgres.SpoolDirectory,
+ pgData,
+ path.Join(pgData, constants.CheckEmptyWalArchiveFile)); err != nil {
+ return fmt.Errorf("while creating the archiver: %w", err)
+ }
+
+ // Step 1: Check if the archive location is safe to perform archiving
+ if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) {
+ if err := checkWalArchive(ctx, cluster, walArchiver, pgData); err != nil {
+ return err
+ }
+ }
+
+ // Step 2: check if this WAL file has not been already archived
+ var isDeletedFromSpool bool
+ isDeletedFromSpool, err = walArchiver.DeleteFromSpool(walName)
+ if err != nil {
+ return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err)
+ }
+ if isDeletedFromSpool {
+ contextLog.Info("Archived WAL file (parallel)",
+ "walName", walName,
+ "currentPrimary", cluster.Status.CurrentPrimary,
+ "targetPrimary", cluster.Status.TargetPrimary)
+ return nil
+ }
+
+ // Step 3: gather the WAL files names to archive
+ walFilesList := walUtils.GatherReadyWALFiles(
+ ctx,
+ walUtils.GatherReadyWALFilesConfig{
+ MaxResults: maxParallel,
+ SkipWALs: []string{walName},
+ PgDataPath: pgData,
+ },
+ )
+
+ // Ensure the requested WAL file is always the first one being
+ // archived
+ walFilesList.Ready = append([]string{walName}, walFilesList.Ready...)
+
+ options, err := walArchiver.BarmanCloudWalArchiveOptions(
+ ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name)
+ if err != nil {
+ return err
+ }
+
+ // Step 5: archive the WAL files in parallel
+ uploadStartTime := time.Now()
+ walStatus := walArchiver.ArchiveList(ctx, walFilesList.ReadyItemsToSlice(), options)
+ if len(walStatus) > 1 {
+ contextLog.Info("Completed archive command (parallel)",
+ "walsCount", len(walStatus),
+ "startTime", startTime,
+ "uploadStartTime", uploadStartTime,
+ "uploadTotalTime", time.Since(uploadStartTime),
+ "totalTime", time.Since(startTime))
+ }
+
+ // We return only the first error to PostgreSQL, because the first error
+ // is the one raised by the file that PostgreSQL has requested to archive.
+ // The other errors are related to WAL files that were pre-archived as
+ // a performance optimization and are just logged
+ return walStatus[0].Err
+}
+
+// archiveWALViaPlugins requests every capable plugin to archive the passed
+// WAL file, and returns an error if a configured plugin fails to do so.
+// It will not return an error if there's no plugin capable of WAL archiving
+func archiveWALViaPlugins(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ walName string,
+) error {
+ contextLogger := log.FromContext(ctx)
+
+ plugins := repository.New()
+ availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir)
+ if err != nil {
+ contextLogger.Error(err, "Error while loading local plugins")
+ }
+ defer plugins.Close()
+
+ availablePluginNamesSet := stringset.From(availablePluginNames)
+ enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames())
+
+ client, err := pluginClient.WithPlugins(
+ ctx,
+ plugins,
+ availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()...,
+ )
+ if err != nil {
+ contextLogger.Error(err, "Error while loading required plugins")
+ return err
+ }
+ defer client.Close(ctx)
+
+ return client.ArchiveWAL(ctx, cluster, walName)
+}
+
+// isCheckWalArchiveFlagFilePresent returns true if the file CheckEmptyWalArchiveFile is present in the PGDATA directory
+func isCheckWalArchiveFlagFilePresent(ctx context.Context, pgDataDirectory string) bool {
+ contextLogger := log.FromContext(ctx)
+ filePath := filepath.Join(pgDataDirectory, constants.CheckEmptyWalArchiveFile)
+
+ exists, err := fileutils.FileExists(filePath)
+ if err != nil {
+ contextLogger.Error(err, "error while checking for the existence of the CheckEmptyWalArchiveFile")
+ }
+ // If the check empty wal archive file doesn't exist this it's a no-op
+ if !exists {
+ contextLogger.Debug("WAL check flag file not found, skipping check")
+ return false
+ }
+
+ return exists
+}
+
+func checkWalArchive(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ walArchiver *barmanArchiver.WALArchiver,
+ pgData string,
+) error {
+ contextLogger := log.FromContext(ctx)
+ checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions(
+ ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name)
+ if err != nil {
+ contextLogger.Error(err, "while getting barman-cloud-wal-archive options")
+ return err
+ }
+
+ if !isCheckWalArchiveFlagFilePresent(ctx, pgData) {
+ return nil
+ }
+
+ if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil {
+ contextLogger.Error(err, "while barman-cloud-check-wal-archive")
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/management/postgres/archiver/doc.go b/pkg/management/postgres/archiver/doc.go
new file mode 100644
index 0000000000..e29ef4fe9d
--- /dev/null
+++ b/pkg/management/postgres/archiver/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package archiver contains the logic of the CloudNativePG WAL archiver
+package archiver
diff --git a/pkg/management/postgres/constants/constants.go b/pkg/management/postgres/constants/constants.go
index 51d73ac1f7..5f26ed85f6 100644
--- a/pkg/management/postgres/constants/constants.go
+++ b/pkg/management/postgres/constants/constants.go
@@ -51,4 +51,9 @@ const (
// Startup is the name of a file that is created once during the first reconcile of an instance
Startup = "cnpg_initialized"
+
+ // CheckEmptyWalArchiveFile is the name of the file in the PGDATA that,
+ // if present, requires the WAL archiver to check that the backup object
+ // store is empty.
+ CheckEmptyWalArchiveFile = ".check-empty-wal-archive"
)
diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go
index ee3b1d9ada..cc338d5201 100644
--- a/pkg/management/postgres/initdb.go
+++ b/pkg/management/postgres/initdb.go
@@ -47,13 +47,6 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/system"
)
-const (
- // CheckEmptyWalArchiveFile is the name of the file in the PGDATA that,
- // if present, requires the WAL archiver to check that the backup object
- // store is empty.
- CheckEmptyWalArchiveFile = ".check-empty-wal-archive"
-)
-
// InitInfo contains all the info needed to bootstrap a new PostgreSQL instance
type InitInfo struct {
// The data directory where to generate the new cluster
@@ -339,7 +332,7 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error {
return fmt.Errorf("could not execute post init application SQL refs: %w", err)
}
- filePath := filepath.Join(info.PgData, CheckEmptyWalArchiveFile)
+ filePath := filepath.Join(info.PgData, constants.CheckEmptyWalArchiveFile)
// We create the check empty wal archive file to tell that we should check if the
// destination path it is empty
if err := fileutils.CreateEmptyFile(filepath.Clean(filePath)); err != nil {
diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go
index dc72d03ee7..c1652107c8 100644
--- a/pkg/management/postgres/restore.go
+++ b/pkg/management/postgres/restore.go
@@ -1005,7 +1005,7 @@ func (info *InitInfo) checkBackupDestination(
env,
postgresSpec.SpoolDirectory,
info.PgData,
- path.Join(info.PgData, CheckEmptyWalArchiveFile))
+ path.Join(info.PgData, constants.CheckEmptyWalArchiveFile))
if err != nil {
return fmt.Errorf("while creating the archiver: %w", err)
}
From 9444ebc5e1863155009c2461173b45d3c766953e Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Tue, 26 Nov 2024 20:42:52 +0100
Subject: [PATCH 164/836] fix: remove spurious log line on walarchive failure
(#6169)
The implementation in #6066 introduced a regression where, in the event
of a WAL archive failure, an additional incorrect error message was
always logged:
"Error while invoking the set WAL archive condition endpoint."
This patch ensures the error messages are logged correctly without
misleading additional errors.
Signed-off-by: Marco Nenciarini
---
internal/cmd/manager/walarchive/cmd.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go
index 23756b5cdd..2442f32715 100644
--- a/internal/cmd/manager/walarchive/cmd.go
+++ b/internal/cmd/manager/walarchive/cmd.go
@@ -66,7 +66,7 @@ func NewCmd() *cobra.Command {
} else {
contextLog.Error(err, logErrorMessage)
}
- if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); err != nil {
+ if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil {
contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint")
}
return err
From 965393641d75d1bda37eac83e62c1fc56becaaf2 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 15:54:03 +0100
Subject: [PATCH 165/836] fix(deps): update all non-major go dependencies
(main) (#6131)
https://github.com/Masterminds/semver `v3.3.0` -> `v3.3.1`
https://github.com/goreleaser/goreleaser `v2.4.5` -> `v2.4.8`
https://github.com/onsi/ginkgo `v2.21.0` -> `v2.22.0`
https://github.com/onsi/gomega `v1.35.1` -> `v1.36.0`
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
Makefile | 2 +-
go.mod | 6 +++---
go.sum | 12 ++++++------
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/Makefile b/Makefile
index b535b35bb4..170f87025f 100644
--- a/Makefile
+++ b/Makefile
@@ -43,7 +43,7 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
-GORELEASER_VERSION ?= v2.4.5
+GORELEASER_VERSION ?= v2.4.8
SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.37.0
diff --git a/go.mod b/go.mod
index 2920230af2..f276419c5c 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@ toolchain go1.23.3
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
- github.com/Masterminds/semver/v3 v3.3.0
+ github.com/Masterminds/semver/v3 v3.3.1
github.com/avast/retry-go/v4 v4.6.0
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
@@ -25,8 +25,8 @@ require (
github.com/lib/pq v1.10.9
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
- github.com/onsi/ginkgo/v2 v2.21.0
- github.com/onsi/gomega v1.35.1
+ github.com/onsi/ginkgo/v2 v2.22.0
+ github.com/onsi/gomega v1.36.0
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
diff --git a/go.sum b/go.sum
index e372961fb6..317da64330 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
-github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
-github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
+github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
@@ -146,10 +146,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
-github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
-github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
-github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
+github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
+github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
From b656983d9cb50a7a47352bbd5368f570e3a642f4 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 16:33:21 +0100
Subject: [PATCH 166/836] chore(deps): update cloudnative-pg/ciclops action to
v1.3.1 (main) (#6177)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 996673da01..3132f662be 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -2133,7 +2133,7 @@ jobs:
- name: Compute the E2E test summary
id: generate-summary
- uses: cloudnative-pg/ciclops@v1.3.0
+ uses: cloudnative-pg/ciclops@v1.3.1
with:
artifact_directory: test-artifacts/data
From 061752274a0327fddd33cb0783b9a8eb6cb22dd0 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 17:48:13 +0100
Subject: [PATCH 167/836] chore(deps): update dependency rook/rook to v1.15.6
(main) (#6158)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 3132f662be..3f7d561e15 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -37,7 +37,7 @@ env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.25.0"
- ROOK_VERSION: "v1.15.5"
+ ROOK_VERSION: "v1.15.6"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
From e8fb1e742e04ff5808df4eb5dc55d2596459c11c Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 18:13:49 +0100
Subject: [PATCH 168/836] chore(deps): update operator framework to v1.38.0
(main) (#6186)
---
Makefile | 2 +-
config/olm-scorecard/patches/basic.config.yaml | 2 +-
config/olm-scorecard/patches/olm.config.yaml | 10 +++++-----
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/Makefile b/Makefile
index 170f87025f..bf85b798eb 100644
--- a/Makefile
+++ b/Makefile
@@ -46,7 +46,7 @@ CONTROLLER_TOOLS_VERSION ?= v0.16.5
GORELEASER_VERSION ?= v2.4.8
SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
-OPERATOR_SDK_VERSION ?= v1.37.0
+OPERATOR_SDK_VERSION ?= v1.38.0
OPM_VERSION ?= v1.48.0
PREFLIGHT_VERSION ?= 1.10.2
OPENSHIFT_VERSIONS ?= v4.12-v4.17
diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml
index fd6200ae97..84683cf8d7 100644
--- a/config/olm-scorecard/patches/basic.config.yaml
+++ b/config/olm-scorecard/patches/basic.config.yaml
@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- basic-check-spec
- image: quay.io/operator-framework/scorecard-test:v1.37.0
+ image: quay.io/operator-framework/scorecard-test:v1.38.0
labels:
suite: basic
test: basic-check-spec-test
diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml
index a547ce213d..43f40a8b3f 100644
--- a/config/olm-scorecard/patches/olm.config.yaml
+++ b/config/olm-scorecard/patches/olm.config.yaml
@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- olm-bundle-validation
- image: quay.io/operator-framework/scorecard-test:v1.37.0
+ image: quay.io/operator-framework/scorecard-test:v1.38.0
labels:
suite: olm
test: olm-bundle-validation-test
@@ -14,7 +14,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-validation
- image: quay.io/operator-framework/scorecard-test:v1.37.0
+ image: quay.io/operator-framework/scorecard-test:v1.38.0
labels:
suite: olm
test: olm-crds-have-validation-test
@@ -24,7 +24,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-resources
- image: quay.io/operator-framework/scorecard-test:v1.37.0
+ image: quay.io/operator-framework/scorecard-test:v1.38.0
labels:
suite: olm
test: olm-crds-have-resources-test
@@ -34,7 +34,7 @@
entrypoint:
- scorecard-test
- olm-spec-descriptors
- image: quay.io/operator-framework/scorecard-test:v1.37.0
+ image: quay.io/operator-framework/scorecard-test:v1.38.0
labels:
suite: olm
test: olm-spec-descriptors-test
@@ -44,7 +44,7 @@
entrypoint:
- scorecard-test
- olm-status-descriptors
- image: quay.io/operator-framework/scorecard-test:v1.37.0
+ image: quay.io/operator-framework/scorecard-test:v1.38.0
labels:
suite: olm
test: olm-status-descriptors-test
From b3b411633fd733f8baec7ea1acf543a29633645c Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Wed, 27 Nov 2024 18:25:38 +0100
Subject: [PATCH 169/836] feat: add declarative management of PostgreSQL
logical replication (#5329)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This pull request adds a feature for managing PostgreSQL logical
replication through a declarative method by introducing two new
Kubernetes CRDs: Publications and Subscriptions.
Closes #5567
Signed-off-by: Leonardo Cecchi
Signed-off-by: Gabriele Quaresima
Signed-off-by: Niccolò Fei
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Signed-off-by: Marco Nenciarini
Signed-off-by: Gabriele Bartolini
Co-authored-by: Gabriele Quaresima
Co-authored-by: Niccolò Fei
Co-authored-by: Jaime Silvela
Co-authored-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
Co-authored-by: Gabriele Bartolini
---
.wordlist-en-custom.txt | 18 +
PROJECT | 18 +
api/v1/database_funcs.go | 24 +
api/v1/publication_funcs.go | 46 ++
api/v1/publication_types.go | 162 +++++++
api/v1/subscription_funcs.go | 46 ++
api/v1/subscription_types.go | 121 +++++
api/v1/zz_generated.deepcopy.go | 267 +++++++++++
.../postgresql.cnpg.io_publications.yaml | 195 ++++++++
.../postgresql.cnpg.io_subscriptions.yaml | 146 ++++++
config/crd/kustomization.yaml | 14 +
.../cloudnative-pg.clusterserviceversion.yaml | 49 +-
config/olm-samples/kustomization.yaml | 2 +
.../postgresql_v1_publication.yaml | 11 +
.../postgresql_v1_subscription.yaml | 11 +
config/rbac/kustomization.yaml | 4 +
config/rbac/publication_editor_role.yaml | 27 ++
config/rbac/publication_viewer_role.yaml | 23 +
config/rbac/role.yaml | 4 +
config/rbac/subscription_editor_role.yaml | 27 ++
config/rbac/subscription_viewer_role.yaml | 23 +
contribute/e2e_testing_environment/README.md | 1 +
docs/mkdocs.yml | 1 +
docs/src/cloudnative-pg.v1.md | 430 +++++++++++++++++
docs/src/database_import.md | 18 +
docs/src/e2e.md | 1 +
docs/src/index.md | 7 +-
docs/src/logical_replication.md | 444 ++++++++++++++++++
docs/src/operator_capability_levels.md | 9 +
docs/src/replication.md | 8 +-
.../cluster-example-logical-destination.yaml | 20 +-
.../cluster-example-logical-source.yaml | 28 +-
.../samples/publication-example-objects.yaml | 16 +
docs/src/samples/publication-example.yaml | 11 +
docs/src/samples/subscription-example.yaml | 11 +
internal/cmd/manager/instance/run/cmd.go | 24 +
.../logical/publication/create/publication.go | 16 +-
.../publication/create/publication_test.go | 8 +-
internal/controller/cluster_controller.go | 4 +-
internal/controller/finalizers_delete.go | 84 +++-
internal/controller/finalizers_delete_test.go | 169 ++++++-
internal/management/controller/common.go | 111 +++++
.../controller/database_controller.go | 14 +-
internal/management/controller/finalizers.go | 49 ++
internal/management/controller/manager.go | 13 +-
.../controller/publication_controller.go | 178 +++++++
.../controller/publication_controller_sql.go | 193 ++++++++
.../publication_controller_sql_test.go | 225 +++++++++
.../controller/subscription_controller.go | 195 ++++++++
.../controller/subscription_controller_sql.go | 150 ++++++
.../subscription_controller_sql_test.go | 169 +++++++
.../subscription_controller_test.go | 32 ++
pkg/specs/roles.go | 56 +++
pkg/specs/roles_test.go | 2 +-
pkg/utils/finalizers.go | 8 +
.../destination-cluster.yaml.template | 48 ++
.../destination-database.yaml | 9 +
.../e2e/fixtures/declarative_pub_sub/pub.yaml | 11 +
.../source-cluster.yaml.template | 48 ++
.../declarative_pub_sub/source-database.yaml | 9 +
.../e2e/fixtures/declarative_pub_sub/sub.yaml | 11 +
tests/e2e/publication_subscription_test.go | 236 ++++++++++
tests/labels.go | 3 +
63 files changed, 4235 insertions(+), 83 deletions(-)
create mode 100644 api/v1/database_funcs.go
create mode 100644 api/v1/publication_funcs.go
create mode 100644 api/v1/publication_types.go
create mode 100644 api/v1/subscription_funcs.go
create mode 100644 api/v1/subscription_types.go
create mode 100644 config/crd/bases/postgresql.cnpg.io_publications.yaml
create mode 100644 config/crd/bases/postgresql.cnpg.io_subscriptions.yaml
create mode 100644 config/olm-samples/postgresql_v1_publication.yaml
create mode 100644 config/olm-samples/postgresql_v1_subscription.yaml
create mode 100644 config/rbac/publication_editor_role.yaml
create mode 100644 config/rbac/publication_viewer_role.yaml
create mode 100644 config/rbac/subscription_editor_role.yaml
create mode 100644 config/rbac/subscription_viewer_role.yaml
create mode 100644 docs/src/logical_replication.md
create mode 100644 docs/src/samples/publication-example-objects.yaml
create mode 100644 docs/src/samples/publication-example.yaml
create mode 100644 docs/src/samples/subscription-example.yaml
create mode 100644 internal/management/controller/common.go
create mode 100644 internal/management/controller/finalizers.go
create mode 100644 internal/management/controller/publication_controller.go
create mode 100644 internal/management/controller/publication_controller_sql.go
create mode 100644 internal/management/controller/publication_controller_sql_test.go
create mode 100644 internal/management/controller/subscription_controller.go
create mode 100644 internal/management/controller/subscription_controller_sql.go
create mode 100644 internal/management/controller/subscription_controller_sql_test.go
create mode 100644 internal/management/controller/subscription_controller_test.go
create mode 100644 tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template
create mode 100644 tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml
create mode 100644 tests/e2e/fixtures/declarative_pub_sub/pub.yaml
create mode 100644 tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template
create mode 100644 tests/e2e/fixtures/declarative_pub_sub/source-database.yaml
create mode 100644 tests/e2e/fixtures/declarative_pub_sub/sub.yaml
create mode 100644 tests/e2e/publication_subscription_test.go
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 9bc6770ca5..c72a9994a2 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -340,6 +340,13 @@ PrimaryUpdateStrategy
PriorityClass
PriorityClassName
ProjectedVolumeSource
+PublicationReclaimPolicy
+PublicationSpec
+PublicationStatus
+PublicationTarget
+PublicationTargetAllTables
+PublicationTargetObject
+PublicationTargetTable
PullPolicy
QoS
Quaresima
@@ -425,6 +432,9 @@ StatefulSets
StorageClass
StorageConfiguration
Storages
+SubscriptionReclaimPolicy
+SubscriptionSpec
+SubscriptionStatus
SuccessfullyExtracted
SwitchReplicaClusterStatus
SyncReplicaElectionConstraints
@@ -494,6 +504,7 @@ addons
affinityconfiguration
aks
albert
+allTables
allnamespaces
alloc
allocator
@@ -741,6 +752,7 @@ executables
expirations
extensibility
externalCluster
+externalClusterName
externalClusterSecretVersion
externalClusters
externalclusters
@@ -1063,6 +1075,9 @@ promotionTimeout
promotionToken
provisioner
psql
+publicationDBName
+publicationName
+publicationReclaimPolicy
pv
pvc
pvcCount
@@ -1208,6 +1223,7 @@ subcommand
subcommands
subdirectory
subresource
+subscriptionReclaimPolicy
substatement
successfullyExtracted
sudo
@@ -1226,6 +1242,8 @@ syslog
systemd
sysv
tAc
+tableExpression
+tablesInSchema
tablespace
tablespaceClassName
tablespaceMapFile
diff --git a/PROJECT b/PROJECT
index 59c5113ca1..27b49f0be3 100644
--- a/PROJECT
+++ b/PROJECT
@@ -66,3 +66,21 @@ resources:
kind: Database
path: github.com/cloudnative-pg/cloudnative-pg/api/v1
version: v1
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: cnpg.io
+ group: postgresql
+ kind: Publication
+ path: github.com/cloudnative-pg/cloudnative-pg/api/v1
+ version: v1
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: cnpg.io
+ group: postgresql
+ kind: Subscription
+ path: github.com/cloudnative-pg/cloudnative-pg/api/v1
+ version: v1
diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go
new file mode 100644
index 0000000000..879d97490c
--- /dev/null
+++ b/api/v1/database_funcs.go
@@ -0,0 +1,24 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import corev1 "k8s.io/api/core/v1"
+
+// GetClusterRef returns the cluster reference of the database
+func (db *Database) GetClusterRef() corev1.LocalObjectReference {
+ return db.Spec.ClusterRef
+}
diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go
new file mode 100644
index 0000000000..e67255b68c
--- /dev/null
+++ b/api/v1/publication_funcs.go
@@ -0,0 +1,46 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/utils/ptr"
+)
+
+// SetAsFailed sets the publication as failed with the given error
+func (pub *Publication) SetAsFailed(err error) {
+ pub.Status.Applied = ptr.To(false)
+ pub.Status.Message = err.Error()
+}
+
+// SetAsUnknown sets the publication as unknown with the given error
+func (pub *Publication) SetAsUnknown(err error) {
+ pub.Status.Applied = nil
+ pub.Status.Message = err.Error()
+}
+
+// SetAsReady sets the subscription as working correctly
+func (pub *Publication) SetAsReady() {
+ pub.Status.Applied = ptr.To(true)
+ pub.Status.Message = ""
+ pub.Status.ObservedGeneration = pub.Generation
+}
+
+// GetClusterRef returns the cluster reference of the publication
+func (pub *Publication) GetClusterRef() corev1.LocalObjectReference {
+ return pub.Spec.ClusterRef
+}
diff --git a/api/v1/publication_types.go b/api/v1/publication_types.go
new file mode 100644
index 0000000000..39be47ef63
--- /dev/null
+++ b/api/v1/publication_types.go
@@ -0,0 +1,162 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications.
+// +enum
+type PublicationReclaimPolicy string
+
+const (
+ // PublicationReclaimDelete means the publication will be deleted from Kubernetes on release
+ // from its claim.
+ PublicationReclaimDelete PublicationReclaimPolicy = "delete"
+
+ // PublicationReclaimRetain means the publication will be left in its current phase for manual
+ // reclamation by the administrator. The default policy is Retain.
+ PublicationReclaimRetain PublicationReclaimPolicy = "retain"
+)
+
+// PublicationSpec defines the desired state of Publication
+type PublicationSpec struct {
+ // The name of the PostgreSQL cluster that identifies the "publisher"
+ ClusterRef corev1.LocalObjectReference `json:"cluster"`
+
+ // The name of the publication inside PostgreSQL
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
+ Name string `json:"name"`
+
+ // The name of the database where the publication will be installed in
+ // the "publisher" cluster
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable"
+ DBName string `json:"dbname"`
+
+ // Publication parameters part of the `WITH` clause as expected by
+ // PostgreSQL `CREATE PUBLICATION` command
+ // +optional
+ Parameters map[string]string `json:"parameters,omitempty"`
+
+ // Target of the publication as expected by PostgreSQL `CREATE PUBLICATION` command
+ Target PublicationTarget `json:"target"`
+
+ // The policy for end-of-life maintenance of this publication
+ // +kubebuilder:validation:Enum=delete;retain
+ // +kubebuilder:default:=retain
+ // +optional
+ ReclaimPolicy PublicationReclaimPolicy `json:"publicationReclaimPolicy,omitempty"`
+}
+
+// PublicationTarget is what this publication should publish
+// +kubebuilder:validation:XValidation:rule="(has(self.allTables) && !has(self.objects)) || (!has(self.allTables) && has(self.objects))",message="allTables and objects are mutually exclusive"
+type PublicationTarget struct {
+ // Marks the publication as one that replicates changes for all tables
+ // in the database, including tables created in the future.
+ // Corresponding to `FOR ALL TABLES` in PostgreSQL.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="allTables is immutable"
+ // +optional
+ AllTables bool `json:"allTables,omitempty"`
+
+ // Just the following schema objects
+ // +kubebuilder:validation:XValidation:rule="!(self.exists(o, has(o.table) && has(o.table.columns)) && self.exists(o, has(o.tablesInSchema)))",message="specifying a column list when the publication also publishes tablesInSchema is not supported"
+ // +kubebuilder:validation:MaxItems=100000
+ // +optional
+ Objects []PublicationTargetObject `json:"objects,omitempty"`
+}
+
+// PublicationTargetObject is an object to publish
+// +kubebuilder:validation:XValidation:rule="(has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) && has(self.table))",message="tablesInSchema and table are mutually exclusive"
+type PublicationTargetObject struct {
+ // Marks the publication as one that replicates changes for all tables
+ // in the specified list of schemas, including tables created in the
+ // future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL.
+ // +optional
+ TablesInSchema string `json:"tablesInSchema,omitempty"`
+
+ // Specifies a list of tables to add to the publication. Corresponding
+ // to `FOR TABLE` in PostgreSQL.
+ // +optional
+ Table *PublicationTargetTable `json:"table,omitempty"`
+}
+
+// PublicationTargetTable is a table to publish
+type PublicationTargetTable struct {
+ // Whether to limit to the table only or include all its descendants
+ // +optional
+ Only bool `json:"only,omitempty"`
+
+ // The table name
+ Name string `json:"name"`
+
+ // The schema name
+ // +optional
+ Schema string `json:"schema,omitempty"`
+
+ // The columns to publish
+ // +optional
+ Columns []string `json:"columns,omitempty"`
+}
+
+// PublicationStatus defines the observed state of Publication
+type PublicationStatus struct {
+ // A sequence number representing the latest
+ // desired state that was synchronized
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Applied is true if the publication was reconciled correctly
+ // +optional
+ Applied *bool `json:"applied,omitempty"`
+
+ // Message is the reconciliation output message
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// +genclient
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
+// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
+// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name"
+// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message"
+
+// Publication is the Schema for the publications API
+type Publication struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec PublicationSpec `json:"spec"`
+ Status PublicationStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// PublicationList contains a list of Publication
+type PublicationList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Publication `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Publication{}, &PublicationList{})
+}
diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go
new file mode 100644
index 0000000000..49a418bdae
--- /dev/null
+++ b/api/v1/subscription_funcs.go
@@ -0,0 +1,46 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/utils/ptr"
+)
+
+// SetAsFailed sets the subscription as failed with the given error
+func (sub *Subscription) SetAsFailed(err error) {
+ sub.Status.Applied = ptr.To(false)
+ sub.Status.Message = err.Error()
+}
+
+// SetAsUnknown sets the subscription as unknown with the given error
+func (sub *Subscription) SetAsUnknown(err error) {
+ sub.Status.Applied = nil
+ sub.Status.Message = err.Error()
+}
+
+// SetAsReady sets the subscription as working correctly
+func (sub *Subscription) SetAsReady() {
+ sub.Status.Applied = ptr.To(true)
+ sub.Status.Message = ""
+ sub.Status.ObservedGeneration = sub.Generation
+}
+
+// GetClusterRef returns the cluster reference of the subscription
+func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference {
+ return sub.Spec.ClusterRef
+}
diff --git a/api/v1/subscription_types.go b/api/v1/subscription_types.go
new file mode 100644
index 0000000000..628ec8a4da
--- /dev/null
+++ b/api/v1/subscription_types.go
@@ -0,0 +1,121 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions.
+// +enum
+type SubscriptionReclaimPolicy string
+
+const (
+ // SubscriptionReclaimDelete means the subscription will be deleted from Kubernetes on release
+ // from its claim.
+ SubscriptionReclaimDelete SubscriptionReclaimPolicy = "delete"
+
+ // SubscriptionReclaimRetain means the subscription will be left in its current phase for manual
+ // reclamation by the administrator. The default policy is Retain.
+ SubscriptionReclaimRetain SubscriptionReclaimPolicy = "retain"
+)
+
+// SubscriptionSpec defines the desired state of Subscription
+type SubscriptionSpec struct {
+ // The name of the PostgreSQL cluster that identifies the "subscriber"
+ ClusterRef corev1.LocalObjectReference `json:"cluster"`
+
+ // The name of the subscription inside PostgreSQL
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
+ Name string `json:"name"`
+
+ // The name of the database where the publication will be installed in
+ // the "subscriber" cluster
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable"
+ DBName string `json:"dbname"`
+
+ // Subscription parameters part of the `WITH` clause as expected by
+ // PostgreSQL `CREATE SUBSCRIPTION` command
+ // +optional
+ Parameters map[string]string `json:"parameters,omitempty"`
+
+ // The name of the publication inside the PostgreSQL database in the
+ // "publisher"
+ PublicationName string `json:"publicationName"`
+
+ // The name of the database containing the publication on the external
+ // cluster. Defaults to the one in the external cluster definition.
+ // +optional
+ PublicationDBName string `json:"publicationDBName,omitempty"`
+
+ // The name of the external cluster with the publication ("publisher")
+ ExternalClusterName string `json:"externalClusterName"`
+
+ // The policy for end-of-life maintenance of this subscription
+ // +kubebuilder:validation:Enum=delete;retain
+ // +kubebuilder:default:=retain
+ // +optional
+ ReclaimPolicy SubscriptionReclaimPolicy `json:"subscriptionReclaimPolicy,omitempty"`
+}
+
+// SubscriptionStatus defines the observed state of Subscription
+type SubscriptionStatus struct {
+ // A sequence number representing the latest
+ // desired state that was synchronized
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Applied is true if the subscription was reconciled correctly
+ // +optional
+ Applied *bool `json:"applied,omitempty"`
+
+ // Message is the reconciliation output message
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// +genclient
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
+// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name"
+// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name"
+// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message"
+
+// Subscription is the Schema for the subscriptions API
+type Subscription struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec SubscriptionSpec `json:"spec"`
+ Status SubscriptionStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// SubscriptionList contains a list of Subscription
+type SubscriptionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Subscription `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Subscription{}, &SubscriptionList{})
+}
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index 47e5083fc9..0c367fc416 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -2201,6 +2201,171 @@ func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Publication) DeepCopyInto(out *Publication) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Publication.
+func (in *Publication) DeepCopy() *Publication {
+ if in == nil {
+ return nil
+ }
+ out := new(Publication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Publication) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicationList) DeepCopyInto(out *PublicationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Publication, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationList.
+func (in *PublicationList) DeepCopy() *PublicationList {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PublicationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicationSpec) DeepCopyInto(out *PublicationSpec) {
+ *out = *in
+ out.ClusterRef = in.ClusterRef
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Target.DeepCopyInto(&out.Target)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationSpec.
+func (in *PublicationSpec) DeepCopy() *PublicationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicationStatus) DeepCopyInto(out *PublicationStatus) {
+ *out = *in
+ if in.Applied != nil {
+ in, out := &in.Applied, &out.Applied
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationStatus.
+func (in *PublicationStatus) DeepCopy() *PublicationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicationTarget) DeepCopyInto(out *PublicationTarget) {
+ *out = *in
+ if in.Objects != nil {
+ in, out := &in.Objects, &out.Objects
+ *out = make([]PublicationTargetObject, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTarget.
+func (in *PublicationTarget) DeepCopy() *PublicationTarget {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicationTarget)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicationTargetObject) DeepCopyInto(out *PublicationTargetObject) {
+ *out = *in
+ if in.Table != nil {
+ in, out := &in.Table, &out.Table
+ *out = new(PublicationTargetTable)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTargetObject.
+func (in *PublicationTargetObject) DeepCopy() *PublicationTargetObject {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicationTargetObject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicationTargetTable) DeepCopyInto(out *PublicationTargetTable) {
+ *out = *in
+ if in.Columns != nil {
+ in, out := &in.Columns, &out.Columns
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTargetTable.
+func (in *PublicationTargetTable) DeepCopy() *PublicationTargetTable {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicationTargetTable)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecoveryTarget) DeepCopyInto(out *RecoveryTarget) {
*out = *in
@@ -2612,6 +2777,108 @@ func (in *StorageConfiguration) DeepCopy() *StorageConfiguration {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subscription) DeepCopyInto(out *Subscription) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription.
+func (in *Subscription) DeepCopy() *Subscription {
+ if in == nil {
+ return nil
+ }
+ out := new(Subscription)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Subscription) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Subscription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList.
+func (in *SubscriptionList) DeepCopy() *SubscriptionList {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubscriptionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) {
+ *out = *in
+ out.ClusterRef = in.ClusterRef
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec.
+func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) {
+ *out = *in
+ if in.Applied != nil {
+ in, out := &in.Applied, &out.Applied
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus.
+func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SwitchReplicaClusterStatus) DeepCopyInto(out *SwitchReplicaClusterStatus) {
*out = *in
diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml
new file mode 100644
index 0000000000..2e0fdaf0e9
--- /dev/null
+++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml
@@ -0,0 +1,195 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: publications.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Publication
+ listKind: PublicationList
+ plural: publications
+ singular: publication
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Publication is the Schema for the publications API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PublicationSpec defines the desired state of Publication
+ properties:
+ cluster:
+ description: The name of the PostgreSQL cluster that identifies the
+ "publisher"
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ dbname:
+ description: |-
+ The name of the database where the publication will be installed in
+ the "publisher" cluster
+ type: string
+ x-kubernetes-validations:
+ - message: dbname is immutable
+ rule: self == oldSelf
+ name:
+ description: The name of the publication inside PostgreSQL
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Publication parameters part of the `WITH` clause as expected by
+ PostgreSQL `CREATE PUBLICATION` command
+ type: object
+ publicationReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this publication
+ enum:
+ - delete
+ - retain
+ type: string
+ target:
+ description: Target of the publication as expected by PostgreSQL `CREATE
+ PUBLICATION` command
+ properties:
+ allTables:
+ description: |-
+ Marks the publication as one that replicates changes for all tables
+ in the database, including tables created in the future.
+ Corresponding to `FOR ALL TABLES` in PostgreSQL.
+ type: boolean
+ x-kubernetes-validations:
+ - message: allTables is immutable
+ rule: self == oldSelf
+ objects:
+ description: Just the following schema objects
+ items:
+ description: PublicationTargetObject is an object to publish
+ properties:
+ table:
+ description: |-
+ Specifies a list of tables to add to the publication. Corresponding
+ to `FOR TABLE` in PostgreSQL.
+ properties:
+ columns:
+ description: The columns to publish
+ items:
+ type: string
+ type: array
+ name:
+ description: The table name
+ type: string
+ only:
+ description: Whether to limit to the table only or include
+ all its descendants
+ type: boolean
+ schema:
+ description: The schema name
+ type: string
+ required:
+ - name
+ type: object
+ tablesInSchema:
+ description: |-
+ Marks the publication as one that replicates changes for all tables
+ in the specified list of schemas, including tables created in the
+ future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL.
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: tablesInSchema and table are mutually exclusive
+ rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema)
+ && has(self.table))
+ maxItems: 100000
+ type: array
+ x-kubernetes-validations:
+ - message: specifying a column list when the publication also
+ publishes tablesInSchema is not supported
+ rule: '!(self.exists(o, has(o.table) && has(o.table.columns))
+ && self.exists(o, has(o.tablesInSchema)))'
+ type: object
+ x-kubernetes-validations:
+ - message: allTables and objects are mutually exclusive
+ rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables)
+ && has(self.objects))
+ required:
+ - cluster
+ - dbname
+ - name
+ - target
+ type: object
+ status:
+ description: PublicationStatus defines the observed state of Publication
+ properties:
+ applied:
+ description: Applied is true if the publication was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml
new file mode 100644
index 0000000000..24a9ff12a1
--- /dev/null
+++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml
@@ -0,0 +1,146 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: subscriptions.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Subscription
+ listKind: SubscriptionList
+ plural: subscriptions
+ singular: subscription
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Subscription is the Schema for the subscriptions API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SubscriptionSpec defines the desired state of Subscription
+ properties:
+ cluster:
+ description: The name of the PostgreSQL cluster that identifies the
+ "subscriber"
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ dbname:
+ description: |-
+ The name of the database where the publication will be installed in
+ the "subscriber" cluster
+ type: string
+ x-kubernetes-validations:
+ - message: dbname is immutable
+ rule: self == oldSelf
+ externalClusterName:
+ description: The name of the external cluster with the publication
+ ("publisher")
+ type: string
+ name:
+ description: The name of the subscription inside PostgreSQL
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Subscription parameters part of the `WITH` clause as expected by
+ PostgreSQL `CREATE SUBSCRIPTION` command
+ type: object
+ publicationDBName:
+ description: |-
+ The name of the database containing the publication on the external
+ cluster. Defaults to the one in the external cluster definition.
+ type: string
+ publicationName:
+ description: |-
+ The name of the publication inside the PostgreSQL database in the
+ "publisher"
+ type: string
+ subscriptionReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this subscription
+ enum:
+ - delete
+ - retain
+ type: string
+ required:
+ - cluster
+ - dbname
+ - externalClusterName
+ - name
+ - publicationName
+ type: object
+ status:
+ description: SubscriptionStatus defines the observed state of Subscription
+ properties:
+ applied:
+ description: Applied is true if the subscription was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index 5e4757d42c..6100960f12 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -11,6 +11,9 @@ resources:
- bases/postgresql.cnpg.io_imagecatalogs.yaml
- bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
- bases/postgresql.cnpg.io_databases.yaml
+- bases/postgresql.cnpg.io_publications.yaml
+- bases/postgresql.cnpg.io_subscriptions.yaml
+
# +kubebuilder:scaffold:crdkustomizeresource
patches:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
@@ -52,6 +55,17 @@ patches:
# kind: CustomResourceDefinition
# name: poolers.postgresql.cnpg.io
#- path: patches/cainjection_in_databases.yaml
+# target:
+# kind: CustomResourceDefinition
+# name: databases.postgresql.cnpg.io
+#- path: patches/cainjection_in_publications.yaml
+# target:
+# kind: CustomResourceDefinition
+# name: publications.postgresql.cnpg.io
+#- path: patches/cainjection_in_subscriptions.yaml
+# target:
+# kind: CustomResourceDefinition
+# name: subscriptions.postgresql.cnpg.io
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
index 6f7a2108f5..0bf3485944 100644
--- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
+++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
@@ -688,7 +688,7 @@ spec:
specDescriptors:
- path: databaseReclaimPolicy
displayName: Database reclaim policy
- description: Database reclame policy
+ description: Database reclaim policy
- path: cluster
displayName: Cluster requested to create the database
description: Cluster requested to create the database
@@ -698,3 +698,50 @@ spec:
- path: owner
displayName: Database Owner
description: Database Owner
+ - kind: Publication
+ name: publications.postgresql.cnpg.io
+ displayName: Publication
+ description: Declarative publication
+ version: v1
+ resources:
+ - kind: Cluster
+ name: ''
+ version: v1
+ specDescriptors:
+ - path: name
+ displayName: Publication name
+ description: Publication name
+ - path: dbname
+ displayName: Database name
+ description: Database name
+ - path: cluster
+ displayName: Cluster requested to create the publication
+ description: Cluster requested to create the publication
+ - path: target
+ displayName: Publication target
+ description: Publication target
+ - kind: Subscription
+ name: subscriptions.postgresql.cnpg.io
+ displayName: Subscription
+ description: Declarative subscription
+ version: v1
+ resources:
+ - kind: Cluster
+ name: ''
+ version: v1
+ specDescriptors:
+ - path: name
+ displayName: Subscription name
+ description: Subscription name
+ - path: dbname
+ displayName: Database name
+ description: Database name
+ - path: publicationName
+ displayName: Publication name
+ description: Publication name
+ - path: cluster
+ displayName: Cluster requested to create the subscription
+ description: Cluster requested to create the subscription
+ - path: externalClusterName
+ displayName: Name of the external cluster with publication
+ description: Name of the external cluster with publication
diff --git a/config/olm-samples/kustomization.yaml b/config/olm-samples/kustomization.yaml
index 205a50a544..6bb494f569 100644
--- a/config/olm-samples/kustomization.yaml
+++ b/config/olm-samples/kustomization.yaml
@@ -6,3 +6,5 @@ resources:
- postgresql_v1_imagecatalog.yaml
- postgresql_v1_clusterimagecatalog.yaml
- postgresql_v1_database.yaml
+- postgresql_v1_publication.yaml
+- postgresql_v1_subscription.yaml
diff --git a/config/olm-samples/postgresql_v1_publication.yaml b/config/olm-samples/postgresql_v1_publication.yaml
new file mode 100644
index 0000000000..598c02a2bb
--- /dev/null
+++ b/config/olm-samples/postgresql_v1_publication.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: publication-sample
+spec:
+ name: pub
+ dbname: app
+ cluster:
+ name: cluster-sample
+ target:
+ allTables: true
diff --git a/config/olm-samples/postgresql_v1_subscription.yaml b/config/olm-samples/postgresql_v1_subscription.yaml
new file mode 100644
index 0000000000..ecc016619b
--- /dev/null
+++ b/config/olm-samples/postgresql_v1_subscription.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: subscription-sample
+spec:
+ name: sub
+ dbname: app
+ publicationName: pub
+ cluster:
+ name: cluster-sample-dest
+ externalClusterName: cluster-sample
diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml
index 99493b37c4..a561c73dc9 100644
--- a/config/rbac/kustomization.yaml
+++ b/config/rbac/kustomization.yaml
@@ -14,6 +14,10 @@ resources:
# default, aiding admins in cluster management. Those roles are
# not used by the Project itself. You can comment the following lines
# if you do not want those helpers be installed with your Project.
+- subscription_editor_role.yaml
+- subscription_viewer_role.yaml
+- publication_editor_role.yaml
+- publication_viewer_role.yaml
- database_editor_role.yaml
- database_viewer_role.yaml
diff --git a/config/rbac/publication_editor_role.yaml b/config/rbac/publication_editor_role.yaml
new file mode 100644
index 0000000000..f741900fa3
--- /dev/null
+++ b/config/rbac/publication_editor_role.yaml
@@ -0,0 +1,27 @@
+# permissions for end users to edit publications.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ app.kubernetes.io/managed-by: kustomize
+ name: publication-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications/status
+ verbs:
+ - get
diff --git a/config/rbac/publication_viewer_role.yaml b/config/rbac/publication_viewer_role.yaml
new file mode 100644
index 0000000000..32e84f531f
--- /dev/null
+++ b/config/rbac/publication_viewer_role.yaml
@@ -0,0 +1,23 @@
+# permissions for end users to view publications.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ app.kubernetes.io/managed-by: kustomize
+ name: publication-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications/status
+ verbs:
+ - get
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index ce1e7ded88..f47a568f0d 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -141,7 +141,9 @@ rules:
- clusters
- databases
- poolers
+ - publications
- scheduledbackups
+ - subscriptions
verbs:
- create
- delete
@@ -155,7 +157,9 @@ rules:
resources:
- backups/status
- databases/status
+ - publications/status
- scheduledbackups/status
+ - subscriptions/status
verbs:
- get
- patch
diff --git a/config/rbac/subscription_editor_role.yaml b/config/rbac/subscription_editor_role.yaml
new file mode 100644
index 0000000000..066b1c494d
--- /dev/null
+++ b/config/rbac/subscription_editor_role.yaml
@@ -0,0 +1,27 @@
+# permissions for end users to edit subscriptions.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ app.kubernetes.io/managed-by: kustomize
+ name: subscription-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions/status
+ verbs:
+ - get
diff --git a/config/rbac/subscription_viewer_role.yaml b/config/rbac/subscription_viewer_role.yaml
new file mode 100644
index 0000000000..4cf8ff0d06
--- /dev/null
+++ b/config/rbac/subscription_viewer_role.yaml
@@ -0,0 +1,23 @@
+# permissions for end users to view subscriptions.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ app.kubernetes.io/managed-by: kustomize
+ name: subscription-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions/status
+ verbs:
+ - get
diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md
index 30a41ddaf4..dd956ab464 100644
--- a/contribute/e2e_testing_environment/README.md
+++ b/contribute/e2e_testing_environment/README.md
@@ -206,6 +206,7 @@ exported, it will select all medium test cases from the feature type provided.
| `security` |
| `maintenance` |
| `tablespaces` |
+| `publication-subscription` |
| `declarative-databases` |
ex:
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index b9808c650d..2d50e97023 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -31,6 +31,7 @@ nav:
- failure_modes.md
- rolling_update.md
- replication.md
+ - logical_replication.md
- backup.md
- backup_barmanobjectstore.md
- wal_archiving.md
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 0868475df7..c323ced382 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -12,7 +12,9 @@
- [Database](#postgresql-cnpg-io-v1-Database)
- [ImageCatalog](#postgresql-cnpg-io-v1-ImageCatalog)
- [Pooler](#postgresql-cnpg-io-v1-Pooler)
+- [Publication](#postgresql-cnpg-io-v1-Publication)
- [ScheduledBackup](#postgresql-cnpg-io-v1-ScheduledBackup)
+- [Subscription](#postgresql-cnpg-io-v1-Subscription)
## Backup {#postgresql-cnpg-io-v1-Backup}
@@ -224,6 +226,42 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
+## Publication {#postgresql-cnpg-io-v1-Publication}
+
+
+**Appears in:**
+
+
+
+Publication is the Schema for the publications API
+
+
+
+Field Description
+
+apiVersion [Required]
stringpostgresql.cnpg.io/v1
+kind [Required]
stringPublication
+metadata [Required]
+meta/v1.ObjectMeta
+
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
+
+spec [Required]
+PublicationSpec
+
+
+ No description provided.
+
+status [Required]
+PublicationStatus
+
+
+ No description provided.
+
+
+
+
## ScheduledBackup {#postgresql-cnpg-io-v1-ScheduledBackup}
@@ -262,6 +300,42 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
+## Subscription {#postgresql-cnpg-io-v1-Subscription}
+
+
+**Appears in:**
+
+
+
+Subscription is the Schema for the subscriptions API
+
+
+
+Field Description
+
+apiVersion [Required]
stringpostgresql.cnpg.io/v1
+kind [Required]
stringSubscription
+metadata [Required]
+meta/v1.ObjectMeta
+
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
+
+spec [Required]
+SubscriptionSpec
+
+
+ No description provided.
+
+status [Required]
+SubscriptionStatus
+
+
+ No description provided.
+
+
+
+
## AffinityConfiguration {#postgresql-cnpg-io-v1-AffinityConfiguration}
@@ -3955,6 +4029,232 @@ the primary server of the cluster as part of rolling updates
+## PublicationReclaimPolicy {#postgresql-cnpg-io-v1-PublicationReclaimPolicy}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [PublicationSpec](#postgresql-cnpg-io-v1-PublicationSpec)
+
+
+PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications.
+
+
+
+
+## PublicationSpec {#postgresql-cnpg-io-v1-PublicationSpec}
+
+
+**Appears in:**
+
+- [Publication](#postgresql-cnpg-io-v1-Publication)
+
+
+PublicationSpec defines the desired state of Publication
+
+
+
+Field Description
+
+cluster [Required]
+core/v1.LocalObjectReference
+
+
+ The name of the PostgreSQL cluster that identifies the "publisher"
+
+
+name [Required]
+string
+
+
+ The name of the publication inside PostgreSQL
+
+
+dbname [Required]
+string
+
+
+ The name of the database where the publication will be installed in
+the "publisher" cluster
+
+
+parameters
+map[string]string
+
+
+ Publication parameters part of the WITH clause as expected by
+PostgreSQL CREATE PUBLICATION command
+
+
+target [Required]
+PublicationTarget
+
+
+ Target of the publication as expected by PostgreSQL CREATE PUBLICATION command
+
+
+publicationReclaimPolicy
+PublicationReclaimPolicy
+
+
+ The policy for end-of-life maintenance of this publication
+
+
+
+
+
+## PublicationStatus {#postgresql-cnpg-io-v1-PublicationStatus}
+
+
+**Appears in:**
+
+- [Publication](#postgresql-cnpg-io-v1-Publication)
+
+
+PublicationStatus defines the observed state of Publication
+
+
+
+Field Description
+
+observedGeneration
+int64
+
+
+ A sequence number representing the latest
+desired state that was synchronized
+
+
+applied
+bool
+
+
+ Applied is true if the publication was reconciled correctly
+
+
+message
+string
+
+
+ Message is the reconciliation output message
+
+
+
+
+
+## PublicationTarget {#postgresql-cnpg-io-v1-PublicationTarget}
+
+
+**Appears in:**
+
+- [PublicationSpec](#postgresql-cnpg-io-v1-PublicationSpec)
+
+
+PublicationTarget is what this publication should publish
+
+
+
+Field Description
+
+allTables
+bool
+
+
+ Marks the publication as one that replicates changes for all tables
+in the database, including tables created in the future.
+Corresponding to FOR ALL TABLES in PostgreSQL.
+
+
+objects
+[]PublicationTargetObject
+
+
+ Just the following schema objects
+
+
+
+
+
+## PublicationTargetObject {#postgresql-cnpg-io-v1-PublicationTargetObject}
+
+
+**Appears in:**
+
+- [PublicationTarget](#postgresql-cnpg-io-v1-PublicationTarget)
+
+
+PublicationTargetObject is an object to publish
+
+
+
+Field Description
+
+tablesInSchema
+string
+
+
+ Marks the publication as one that replicates changes for all tables
+in the specified list of schemas, including tables created in the
+future. Corresponding to FOR TABLES IN SCHEMA in PostgreSQL.
+
+
+table
+PublicationTargetTable
+
+
+ Specifies a list of tables to add to the publication. Corresponding
+to FOR TABLE in PostgreSQL.
+
+
+
+
+
+## PublicationTargetTable {#postgresql-cnpg-io-v1-PublicationTargetTable}
+
+
+**Appears in:**
+
+- [PublicationTargetObject](#postgresql-cnpg-io-v1-PublicationTargetObject)
+
+
+PublicationTargetTable is a table to publish
+
+
+
+Field Description
+
+only
+bool
+
+
+ Whether to limit to the table only or include all its descendants
+
+
+name [Required]
+string
+
+
+ The table name
+
+
+schema
+string
+
+
+ The schema name
+
+
+columns
+[]string
+
+
+ The columns to publish
+
+
+
+
+
## RecoveryTarget {#postgresql-cnpg-io-v1-RecoveryTarget}
@@ -4817,6 +5117,136 @@ Size cannot be decreased.
+## SubscriptionReclaimPolicy {#postgresql-cnpg-io-v1-SubscriptionReclaimPolicy}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [SubscriptionSpec](#postgresql-cnpg-io-v1-SubscriptionSpec)
+
+
+SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions.
+
+
+
+
+## SubscriptionSpec {#postgresql-cnpg-io-v1-SubscriptionSpec}
+
+
+**Appears in:**
+
+- [Subscription](#postgresql-cnpg-io-v1-Subscription)
+
+
+SubscriptionSpec defines the desired state of Subscription
+
+
+
+Field Description
+
+cluster [Required]
+core/v1.LocalObjectReference
+
+
+ The name of the PostgreSQL cluster that identifies the "subscriber"
+
+
+name [Required]
+string
+
+
+ The name of the subscription inside PostgreSQL
+
+
+dbname [Required]
+string
+
+
+ The name of the database where the publication will be installed in
+the "subscriber" cluster
+
+
+parameters
+map[string]string
+
+
+ Subscription parameters part of the WITH clause as expected by
+PostgreSQL CREATE SUBSCRIPTION command
+
+
+publicationName [Required]
+string
+
+
+ The name of the publication inside the PostgreSQL database in the
+"publisher"
+
+
+publicationDBName
+string
+
+
+ The name of the database containing the publication on the external
+cluster. Defaults to the one in the external cluster definition.
+
+
+externalClusterName [Required]
+string
+
+
+ The name of the external cluster with the publication ("publisher")
+
+
+subscriptionReclaimPolicy
+SubscriptionReclaimPolicy
+
+
+ The policy for end-of-life maintenance of this subscription
+
+
+
+
+
+## SubscriptionStatus {#postgresql-cnpg-io-v1-SubscriptionStatus}
+
+
+**Appears in:**
+
+- [Subscription](#postgresql-cnpg-io-v1-Subscription)
+
+
+SubscriptionStatus defines the observed state of Subscription
+
+
+
+Field Description
+
+observedGeneration
+int64
+
+
+ A sequence number representing the latest
+desired state that was synchronized
+
+
+applied
+bool
+
+
+ Applied is true if the subscription was reconciled correctly
+
+
+message
+string
+
+
+ Message is the reconciliation output message
+
+
+
+
+
## SwitchReplicaClusterStatus {#postgresql-cnpg-io-v1-SwitchReplicaClusterStatus}
diff --git a/docs/src/database_import.md b/docs/src/database_import.md
index f8bba32c4a..3308b5f6f1 100644
--- a/docs/src/database_import.md
+++ b/docs/src/database_import.md
@@ -267,3 +267,21 @@ topic is beyond the scope of CloudNativePG, we recommend that you reduce
unnecessary writes in the checkpoint area by tuning Postgres GUCs like
`shared_buffers`, `max_wal_size`, `checkpoint_timeout` directly in the
`Cluster` configuration.
+
+## Online Import and Upgrades
+
+Logical replication offers a powerful way to import any PostgreSQL database
+accessible over the network using the following approach:
+
+- **Import Bootstrap with Schema-Only Option**: Initialize the schema in the
+ target database before replication begins.
+- **`Subscription` Resource**: Set up continuous replication to synchronize
+ data changes.
+
+This technique can also be leveraged for performing major PostgreSQL upgrades
+with minimal downtime, making it ideal for seamless migrations and system
+upgrades.
+
+For more details, including limitations and best practices, refer to the
+[Logical Replication](logical_replication.md) section in the documentation.
+
diff --git a/docs/src/e2e.md b/docs/src/e2e.md
index e796db13b6..de06101da5 100644
--- a/docs/src/e2e.md
+++ b/docs/src/e2e.md
@@ -60,6 +60,7 @@ and the following suite of E2E tests are performed on that cluster:
* Replication Slots
* Synchronous replication
* Scale-up and scale-down of a Cluster
+ * Logical replication via declarative Publication / Subscription
* **Replica clusters**
* Bootstrapping a replica cluster from backup
diff --git a/docs/src/index.md b/docs/src/index.md
index 815dc0af85..06dec9712e 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -96,12 +96,15 @@ Additionally, the community provides images for the [PostGIS extension](postgis.
* In-place or rolling updates for operator upgrades
* TLS connections and client certificate authentication
* Support for custom TLS certificates (including integration with cert-manager)
-* Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage)
+* Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure
+ Blob Storage, and Google Cloud Storage)
* Backups on volume snapshots (where supported by the underlying storage classes)
* Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage)
* Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores
* Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL
-* Online import of existing PostgreSQL databases, including major upgrades of PostgreSQL, through PostgreSQL native logical replication (imperative, via the `cnpg` plugin)
+* Online import of existing PostgreSQL databases, including major upgrades of
+ PostgreSQL, through PostgreSQL native logical replication (declarative, via
+ the `Subscription` resource)
* Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way
* Hibernation of a PostgreSQL cluster in a declarative way
* Support for quorum-based and priority-based Synchronous Replication
diff --git a/docs/src/logical_replication.md b/docs/src/logical_replication.md
new file mode 100644
index 0000000000..345dfe0cae
--- /dev/null
+++ b/docs/src/logical_replication.md
@@ -0,0 +1,444 @@
+# Logical Replication
+
+PostgreSQL extends its replication capabilities beyond physical replication,
+which operates at the level of exact block addresses and byte-by-byte copying,
+by offering [logical replication](https://www.postgresql.org/docs/current/logical-replication.html).
+Logical replication replicates data objects and their changes based on a
+defined replication identity, typically the primary key.
+
+Logical replication uses a publish-and-subscribe model, where subscribers
+connect to publications on a publisher node. Subscribers pull data changes from
+these publications and can re-publish them, enabling cascading replication and
+complex topologies.
+
+This flexible model is particularly useful for:
+
+- Online data migrations
+- Live PostgreSQL version upgrades
+- Data distribution across systems
+- Real-time analytics
+- Integration with external applications
+
+!!! Info
+ For more details, examples, and limitations, please refer to the
+ [official PostgreSQL documentation on Logical Replication](https://www.postgresql.org/docs/current/logical-replication.html).
+
+**CloudNativePG** enhances this capability by providing declarative support for
+key PostgreSQL logical replication objects:
+
+- **Publications** via the `Publication` resource
+- **Subscriptions** via the `Subscription` resource
+
+## Publications
+
+In PostgreSQL's publish-and-subscribe replication model, a
+[**publication**](https://www.postgresql.org/docs/current/logical-replication-publication.html)
+is the source of data changes. It acts as a logical container for the change
+sets (also known as *replication sets*) generated from one or more tables within
+a database. Publications can be defined on any PostgreSQL 10+ instance acting
+as the *publisher*, including instances managed by popular DBaaS solutions in the
+public cloud. Each publication is tied to a single database and provides
+fine-grained control over which tables and changes are replicated.
+
+For publishers outside Kubernetes, you can [create publications using SQL](https://www.postgresql.org/docs/current/sql-createpublication.html)
+or leverage the [`cnpg publication create` plugin command](kubectl-plugin.md#logical-replication-publications).
+
+When managing `Cluster` objects with **CloudNativePG**, PostgreSQL publications
+can be defined declaratively through the `Publication` resource.
+
+!!! Info
+ Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Publication)
+ for the full list of attributes you can define for each `Publication` object.
+
+Suppose you have a cluster named `freddie` and want to replicate all tables in
+the `app` database. Here's a `Publication` manifest:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: freddie-publisher
+spec:
+ cluster:
+ name: freddie
+ dbname: app
+ name: publisher
+ target:
+ allTables: true
+```
+
+In the above example:
+
+- The publication object is named `freddie-publisher` (`metadata.name`).
+- The publication is created via the primary of the `freddie` cluster
+ (`spec.cluster.name`) with name `publisher` (`spec.name`).
+- It includes all tables (`spec.target.allTables: true`) from the `app`
+ database (`spec.dbname`).
+
+!!! Important
+ While `allTables` simplifies configuration, PostgreSQL offers fine-grained
+ control for replicating specific tables or targeted data changes. For advanced
+ configurations, consult the [PostgreSQL documentation](https://www.postgresql.org/docs/current/logical-replication.html).
+ Additionally, refer to the [CloudNativePG API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-PublicationTarget)
+ for details on declaratively customizing replication targets.
+
+### Required Fields in the `Publication` Manifest
+
+The following fields are required for a `Publication` object:
+
+- `metadata.name`: Unique name for the Kubernetes `Publication` object.
+- `spec.cluster.name`: Name of the PostgreSQL cluster.
+- `spec.dbname`: Database name where the publication is created.
+- `spec.name`: Publication name in PostgreSQL.
+- `spec.target`: Specifies the tables or changes to include in the publication.
+
+The `Publication` object must reference a specific `Cluster`, determining where
+the publication will be created. It is managed by the cluster's primary instance,
+ensuring the publication is created or updated as needed.
+
+### Reconciliation and Status
+
+After creating a `Publication`, CloudNativePG manages it on the primary
+instance of the specified cluster. Following a successful reconciliation cycle,
+the `Publication` status will reflect the following:
+
+- `applied: true`, indicates the configuration has been successfully applied.
+- `observedGeneration` matches `metadata.generation`, confirming the applied
+ configuration corresponds to the most recent changes.
+
+If an error occurs during reconciliation, `status.applied` will be `false`, and
+an error message will be included in the `status.message` field.
+
+### Removing a publication
+
+The `publicationReclaimPolicy` field controls the behavior when deleting a
+`Publication` object:
+
+- `retain` (default): Leaves the publication in PostgreSQL for manual
+ management.
+- `delete`: Automatically removes the publication from PostgreSQL.
+
+Consider the following example:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: freddie-publisher
+spec:
+ cluster:
+ name: freddie
+ dbname: app
+ name: publisher
+ target:
+ allTables: true
+ publicationReclaimPolicy: delete
+```
+
+In this case, deleting the `Publication` object also removes the `publisher`
+publication from the `app` database of the `freddie` cluster.
+
+## Subscriptions
+
+In PostgreSQL's publish-and-subscribe replication model, a
+[**subscription**](https://www.postgresql.org/docs/current/logical-replication-subscription.html)
+represents the downstream component that consumes data changes.
+A subscription establishes the connection to a publisher's database and
+specifies the set of publications (one or more) it subscribes to. Subscriptions
+can be created on any supported PostgreSQL instance acting as the *subscriber*.
+
+!!! Important
+ Since schema definitions are not replicated, the subscriber must have the
+ corresponding tables already defined before data replication begins.
+
+CloudNativePG simplifies subscription management by enabling you to define them
+declaratively using the `Subscription` resource.
+
+!!! Info
+ Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Subscription)
+ for the full list of attributes you can define for each `Subscription` object.
+
+Suppose you want to replicate changes from the `publisher` publication on the
+`app` database of the `freddie` cluster (*publisher*) to the `app` database of
+the `king` cluster (*subscriber*). Here's an example of a `Subscription`
+manifest:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: freddie-to-king-subscription
+spec:
+ cluster:
+ name: king
+ dbname: app
+ name: subscriber
+ externalClusterName: freddie
+ publicationName: publisher
+```
+
+In the above example:
+
+- The subscription object is named `freddie-to-king-subscriber` (`metadata.name`).
+- The subscription is created in the `app` database (`spec.dbname`) of the
+ `king` cluster (`spec.cluster.name`), with name `subscriber` (`spec.name`).
+- It connects to the `publisher` publication in the external `freddie` cluster,
+ referenced by `spec.externalClusterName`.
+
+To facilitate this setup, the `freddie` external cluster must be defined in the
+`king` cluster's configuration. Below is an example excerpt showing how to
+define the external cluster in the `king` manifest:
+
+```yaml
+externalClusters:
+ - name: freddie
+ connectionParameters:
+ host: freddie-rw.default.svc
+ user: postgres
+ dbname: app
+```
+
+!!! Info
+ For more details on configuring the `externalClusters` section, see the
+ ["Bootstrap" section](bootstrap.md#the-externalclusters-section) of the
+ documentation.
+
+As you can see, a subscription can connect to any PostgreSQL database
+accessible over the network. This flexibility allows you to seamlessly migrate
+your data into Kubernetes with nearly zero downtime. It’s an excellent option
+for transitioning from various environments, including popular cloud-based
+Database-as-a-Service (DBaaS) platforms.
+
+### Required Fields in the `Subscription` Manifest
+
+The following fields are mandatory for defining a `Subscription` object:
+
+- `metadata.name`: A unique name for the Kubernetes `Subscription` object
+ within its namespace.
+- `spec.cluster.name`: The name of the PostgreSQL cluster where the
+ subscription will be created.
+- `spec.dbname`: The name of the database in which the subscription will be
+ created.
+- `spec.name`: The name of the subscription as it will appear in PostgreSQL.
+- `spec.externalClusterName`: The name of the external cluster, as defined in
+ the `spec.cluster.name` cluster's configuration. This references the
+ publisher database.
+- `spec.publicationName`: The name of the publication in the publisher database
+ to which the subscription will connect.
+
+The `Subscription` object must reference a specific `Cluster`, determining
+where the subscription will be managed. CloudNativePG ensures that the
+subscription is created or updated on the primary instance of the specified
+cluster.
+
+### Reconciliation and Status
+
+After creating a `Subscription`, CloudNativePG manages it on the primary
+instance of the specified cluster. Following a successful reconciliation cycle,
+the `Subscription` status will reflect the following:
+
+- `applied: true`, indicates the configuration has been successfully applied.
+- `observedGeneration` matches `metadata.generation`, confirming the applied
+ configuration corresponds to the most recent changes.
+
+If an error occurs during reconciliation, `status.applied` will be `false`, and
+an error message will be included in the `status.message` field.
+
+### Removing a subscription
+
+The `subscriptionReclaimPolicy` field controls the behavior when deleting a
+`Subscription` object:
+
+- `retain` (default): Leaves the subscription in PostgreSQL for manual
+ management.
+- `delete`: Automatically removes the subscription from PostgreSQL.
+
+Consider the following example:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: freddie-to-king-subscription
+spec:
+ cluster:
+ name: king
+ dbname: app
+ name: subscriber
+ externalClusterName: freddie
+ publicationName: publisher
+ subscriptionReclaimPolicy: delete
+```
+
+In this case, deleting the `Subscription` object also removes the `subscriber`
+subscription from the `app` database of the `king` cluster.
+
+## Limitations
+
+Logical replication in PostgreSQL has some inherent limitations, as outlined in
+the [official documentation](https://www.postgresql.org/docs/current/logical-replication-restrictions.html).
+Notably, the following objects are not replicated:
+
+- **Database schema and DDL commands**
+- **Sequence data**
+- **Large objects**
+
+### Addressing Schema Replication
+
+The first limitation, related to schema replication, can be easily addressed
+using CloudNativePG's capabilities. For instance, you can leverage the `import`
+bootstrap feature to copy the schema of the tables you need to replicate.
+Alternatively, you can manually create the schema as you would for any
+PostgreSQL database.
+
+### Handling Sequences
+
+While sequences are not automatically kept in sync through logical replication,
+CloudNativePG provides a solution to be used in live migrations.
+You can use the [`cnpg` plugin](kubectl-plugin.md#synchronizing-sequences)
+to synchronize sequence values, ensuring consistency between the publisher and
+subscriber databases.
+
+## Example of live migration and major Postgres upgrade with logical replication
+
+To highlight the powerful capabilities of logical replication, this example
+demonstrates how to replicate data from a publisher database (`freddie`)
+running PostgreSQL 16 to a subscriber database (`king`) running the latest
+PostgreSQL version. This setup can be deployed in your Kubernetes cluster for
+evaluation and hands-on learning.
+
+This example illustrates how logical replication facilitates live migrations
+and upgrades between PostgreSQL versions while ensuring data consistency. By
+combining logical replication with CloudNativePG, you can easily set up,
+manage, and evaluate such scenarios in a Kubernetes environment.
+
+### Step 1: Setting Up the Publisher (`freddie`)
+
+The first step involves creating a `freddie` PostgreSQL cluster with version 16.
+The cluster contains a single instance and includes an `app` database
+initialized with a table, `n`, storing 10,000 numbers. A logical replication
+publication named `publisher` is also configured to include all tables in the
+database.
+
+Here’s the manifest for setting up the `freddie` cluster and its publication
+resource:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: freddie
+spec:
+ instances: 1
+
+ imageName: ghcr.io/cloudnative-pg/postgresql:16
+
+ storage:
+ size: 1Gi
+
+ bootstrap:
+ initdb:
+ postInitApplicationSQL:
+ - CREATE TABLE n (i SERIAL PRIMARY KEY, m INTEGER)
+ - INSERT INTO n (m) (SELECT generate_series(1, 10000))
+ - ALTER TABLE n OWNER TO app
+
+ managed:
+ roles:
+ - name: app
+ login: true
+ replication: true
+---
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: freddie-publisher
+spec:
+ cluster:
+ name: freddie
+ dbname: app
+ name: publisher
+ target:
+ allTables: true
+```
+
+### Step 2: Setting Up the Subscriber (`king`)
+
+Next, create the `king` PostgreSQL cluster, running the latest version of
+PostgreSQL. This cluster initializes by importing the schema from the `app`
+database on the `freddie` cluster using the external cluster configuration. A
+`Subscription` resource, `freddie-to-king-subscription`, is then configured to
+consume changes published by the `publisher` on `freddie`.
+
+Below is the manifest for setting up the `king` cluster and its subscription:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: king
+spec:
+ instances: 1
+
+ storage:
+ size: 1Gi
+
+ bootstrap:
+ initdb:
+ import:
+ type: microservice
+ schemaOnly: true
+ databases:
+ - app
+ source:
+ externalCluster: freddie
+
+ externalClusters:
+ - name: freddie
+ connectionParameters:
+ host: freddie-rw.default.svc
+ user: app
+ dbname: app
+ password:
+ name: freddie-app
+ key: password
+---
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: freddie-to-king-subscription
+spec:
+ cluster:
+ name: king
+ dbname: app
+ name: subscriber
+ externalClusterName: freddie
+ publicationName: publisher
+```
+
+Once the `king` cluster is running, you can verify that the replication is
+working by connecting to the `app` database and counting the records in the `n`
+table. The following example uses the `psql` command provided by the `cnpg`
+plugin for simplicity:
+
+```console
+kubectl cnpg psql king -- app -qAt -c 'SELECT count(*) FROM n'
+10000
+```
+
+This command should return `10000`, confirming that the data from the `freddie`
+cluster has been successfully replicated to the `king` cluster.
+
+Using the `cnpg` plugin, you can also synchronize existing sequences to ensure
+consistency between the publisher and subscriber. The example below
+demonstrates how to synchronize a sequence for the `king` cluster:
+
+```console
+kubectl cnpg subscription sync-sequences king --subscription=subscriber
+SELECT setval('"public"."n_i_seq"', 10000);
+
+10000
+```
+
+This command updates the sequence `n_i_seq` in the `king` cluster to match the
+current value, ensuring it is in sync with the source database.
diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md
index 3ef8b0f5d8..5008d33b8c 100644
--- a/docs/src/operator_capability_levels.md
+++ b/docs/src/operator_capability_levels.md
@@ -590,6 +590,15 @@ and makes the use of the underlying PostgreSQL resources more efficient.
Instead of connecting directly to a PostgreSQL service, applications can now
connect to the PgBouncer service and start reusing any existing connection.
+### Logical Replication
+
+CloudNativePG supports PostgreSQL's logical replication in a declarative manner
+using `Publication` and `Subscription` custom resource definitions.
+
+Logical replication is particularly useful together with the import facility
+for online data migrations (even from public DBaaS solutions) and major
+PostgreSQL upgrades.
+
## Level 4: Deep insights
Capability level 4 is about *observability*: monitoring,
diff --git a/docs/src/replication.md b/docs/src/replication.md
index fac1db21c6..ff9f0f1f41 100644
--- a/docs/src/replication.md
+++ b/docs/src/replication.md
@@ -37,10 +37,10 @@ recovery.
PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through
*hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the
transaction level, supporting RPO=0 clusters. Cascading replication was added in
-PostgreSQL 9.2 (2012). The foundations for logical replication were established
-in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native support for
-the publisher/subscriber pattern to replicate data from an origin to a
-destination. The table below summarizes these milestones.
+PostgreSQL 9.2 (2012). The foundations for [logical replication](logical_replication.md)
+were established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced
+native support for the publisher/subscriber pattern to replicate data from an
+origin to a destination. The table below summarizes these milestones.
| Version | Year | Feature |
|:-------:|:----:|-----------------------------------------------------------------------|
diff --git a/docs/src/samples/cluster-example-logical-destination.yaml b/docs/src/samples/cluster-example-logical-destination.yaml
index 75cb3f2af2..e8a2f574f9 100644
--- a/docs/src/samples/cluster-example-logical-destination.yaml
+++ b/docs/src/samples/cluster-example-logical-destination.yaml
@@ -22,12 +22,20 @@ spec:
- name: cluster-example
connectionParameters:
host: cluster-example-rw.default.svc
- # We're using the superuser to allow the publication to be
- # created directly when connected to the target server.
- # See cluster-example-logical-source.yaml for more information
- # about this.
- user: postgres
+ user: app
dbname: app
password:
- name: cluster-example-superuser
+ name: cluster-example-app
key: password
+---
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: cluster-example-dest-sub
+spec:
+ cluster:
+ name: cluster-example-dest
+ name: sub
+ dbname: app
+ publicationName: pub
+ externalClusterName: cluster-example
diff --git a/docs/src/samples/cluster-example-logical-source.yaml b/docs/src/samples/cluster-example-logical-source.yaml
index ad9f888353..95bac8cd82 100644
--- a/docs/src/samples/cluster-example-logical-source.yaml
+++ b/docs/src/samples/cluster-example-logical-source.yaml
@@ -5,7 +5,7 @@ metadata:
spec:
instances: 1
- imageName: ghcr.io/cloudnative-pg/postgresql:13
+ imageName: ghcr.io/cloudnative-pg/postgresql:16
storage:
size: 1Gi
@@ -15,18 +15,30 @@ spec:
postInitApplicationSQL:
- CREATE TABLE numbers (i SERIAL PRIMARY KEY, m INTEGER)
- INSERT INTO numbers (m) (SELECT generate_series(1,10000))
- - ALTER TABLE numbers OWNER TO app;
+ - ALTER TABLE numbers OWNER TO app
- CREATE TABLE numbers_two (i SERIAL PRIMARY KEY, m INTEGER)
- INSERT INTO numbers_two (m) (SELECT generate_series(1,10000))
- - ALTER TABLE numbers_two OWNER TO app;
- - CREATE TABLE numbers_three (i SERIAL PRIMARY KEY, m INTEGER)
- - INSERT INTO numbers_three (m) (SELECT generate_series(1,10000))
- - ALTER TABLE numbers_three OWNER TO app;
-
- enableSuperuserAccess: true
+ - ALTER TABLE numbers_two OWNER TO app
+ - CREATE SCHEMA another_schema
+ - ALTER SCHEMA another_schema OWNER TO app
+ - CREATE TABLE another_schema.numbers_three (i SERIAL PRIMARY KEY, m INTEGER)
+ - INSERT INTO another_schema.numbers_three (m) (SELECT generate_series(1,10000))
+ - ALTER TABLE another_schema.numbers_three OWNER TO app
managed:
roles:
- name: app
login: true
replication: true
+---
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: cluster-example-pub
+spec:
+ name: pub
+ dbname: app
+ cluster:
+ name: cluster-example
+ target:
+ allTables: true
diff --git a/docs/src/samples/publication-example-objects.yaml b/docs/src/samples/publication-example-objects.yaml
new file mode 100644
index 0000000000..2cc68a5296
--- /dev/null
+++ b/docs/src/samples/publication-example-objects.yaml
@@ -0,0 +1,16 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: publication-example-objects
+spec:
+ cluster:
+ name: cluster-example
+ name: pub-objects
+ dbname: app
+ target:
+ objects:
+ - tablesInSchema: public
+ - table:
+ schema: another_schema
+ name: numbers_three
+ only: true
diff --git a/docs/src/samples/publication-example.yaml b/docs/src/samples/publication-example.yaml
new file mode 100644
index 0000000000..d2df4bc3f2
--- /dev/null
+++ b/docs/src/samples/publication-example.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: publication-example
+spec:
+ cluster:
+ name: cluster-example
+ name: pub-all
+ dbname: app
+ target:
+ allTables: true
diff --git a/docs/src/samples/subscription-example.yaml b/docs/src/samples/subscription-example.yaml
new file mode 100644
index 0000000000..6392d71830
--- /dev/null
+++ b/docs/src/samples/subscription-example.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: subscription-sample
+spec:
+ name: sub
+ dbname: app
+ publicationName: pub-all
+ cluster:
+ name: cluster-example-dest
+ externalClusterName: cluster-example
diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go
index 066360ddaa..e02d06d55c 100644
--- a/internal/cmd/manager/instance/run/cmd.go
+++ b/internal/cmd/manager/instance/run/cmd.go
@@ -165,6 +165,16 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error {
instance.GetNamespaceName(): {},
},
},
+ &apiv1.Publication{}: {
+ Namespaces: map[string]cache.Config{
+ instance.GetNamespaceName(): {},
+ },
+ },
+ &apiv1.Subscription{}: {
+ Namespaces: map[string]cache.Config{
+ instance.GetNamespaceName(): {},
+ },
+ },
},
},
// We don't need a cache for secrets and configmap, as all reloads
@@ -215,6 +225,20 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error {
return err
}
+ // database publication reconciler
+ publicationReconciler := controller.NewPublicationReconciler(mgr, instance)
+ if err := publicationReconciler.SetupWithManager(mgr); err != nil {
+ contextLogger.Error(err, "unable to create publication controller")
+ return err
+ }
+
+ // database subscription reconciler
+ subscriptionReconciler := controller.NewSubscriptionReconciler(mgr, instance)
+ if err := subscriptionReconciler.SetupWithManager(mgr); err != nil {
+ contextLogger.Error(err, "unable to create subscription controller")
+ return err
+ }
+
// postgres CSV logs handler (PGAudit too)
postgresLogPipe := logpipe.NewLogPipe()
if err := mgr.Add(postgresLogPipe); err != nil {
diff --git a/internal/cmd/plugin/logical/publication/create/publication.go b/internal/cmd/plugin/logical/publication/create/publication.go
index 9a6be06362..964f35c578 100644
--- a/internal/cmd/plugin/logical/publication/create/publication.go
+++ b/internal/cmd/plugin/logical/publication/create/publication.go
@@ -56,7 +56,7 @@ type PublicationTarget interface {
ToPublicationTargetSQL() string
}
-// PublicationTargetALLTables will publicate all tables
+// PublicationTargetALLTables will publish all tables
type PublicationTargetALLTables struct{}
// ToPublicationTargetSQL implements the PublicationTarget interface
@@ -64,7 +64,7 @@ func (PublicationTargetALLTables) ToPublicationTargetSQL() string {
return "FOR ALL TABLES"
}
-// PublicationTargetPublicationObjects publicates multiple publication objects
+// PublicationTargetPublicationObjects publishes multiple publication objects
type PublicationTargetPublicationObjects struct {
PublicationObjects []PublicationObject
}
@@ -85,15 +85,15 @@ func (objs *PublicationTargetPublicationObjects) ToPublicationTargetSQL() string
return result
}
-// PublicationObject represent an object to publicate
+// PublicationObject represent an object to publish
type PublicationObject interface {
- // Create the SQL statement to publicate this object
+ // ToPublicationObjectSQL creates the SQL statement to publish this object
ToPublicationObjectSQL() string
}
-// PublicationObjectSchema will publicate all the tables in a certain schema
+// PublicationObjectSchema will publish all the tables in a certain schema
type PublicationObjectSchema struct {
- // The schema to publicate
+ // The schema to publish
SchemaName string
}
@@ -102,9 +102,9 @@ func (obj PublicationObjectSchema) ToPublicationObjectSQL() string {
return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.SchemaName}.Sanitize())
}
-// PublicationObjectTableExpression will publicate the passed table expression
+// PublicationObjectTableExpression will publish the passed table expression
type PublicationObjectTableExpression struct {
- // The table expression to publicate
+ // The table expression to publish
TableExpressions []string
}
diff --git a/internal/cmd/plugin/logical/publication/create/publication_test.go b/internal/cmd/plugin/logical/publication/create/publication_test.go
index 60081a0b0e..1aafaa1772 100644
--- a/internal/cmd/plugin/logical/publication/create/publication_test.go
+++ b/internal/cmd/plugin/logical/publication/create/publication_test.go
@@ -22,14 +22,14 @@ import (
)
var _ = Describe("create publication SQL generator", func() {
- It("can publicate all tables", func() {
+ It("can publish all tables", func() {
Expect(PublicationCmdBuilder{
PublicationName: "app",
PublicationTarget: PublicationTargetALLTables{},
}.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES`))
})
- It("can publicate all tables with custom parameters", func() {
+ It("can publish all tables with custom parameters", func() {
Expect(PublicationCmdBuilder{
PublicationName: "app",
PublicationTarget: PublicationTargetALLTables{},
@@ -37,7 +37,7 @@ var _ = Describe("create publication SQL generator", func() {
}.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES WITH (publish='insert')`))
})
- It("can publicate a list of tables via multiple publication objects", func() {
+ It("can publish a list of tables via multiple publication objects", func() {
// This is supported from PG 15
Expect(PublicationCmdBuilder{
PublicationName: "app",
@@ -54,7 +54,7 @@ var _ = Describe("create publication SQL generator", func() {
}.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLE a, TABLE b`))
})
- It("can publicate a list of tables via multiple table expressions", func() {
+ It("can publish a list of tables via multiple table expressions", func() {
// This is supported in PG < 15
Expect(PublicationCmdBuilder{
PublicationName: "app",
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index f433ab5d69..2cdd503e82 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -160,10 +160,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
"namespace", req.Namespace,
)
}
- if err := r.deleteDatabaseFinalizers(ctx, req.NamespacedName); err != nil {
+ if err := r.deleteFinalizers(ctx, req.NamespacedName); err != nil {
contextLogger.Error(
err,
- "error while deleting finalizers of Databases on the cluster",
+ "error while deleting finalizers of objects on the cluster",
"clusterName", req.Name,
"namespace", req.Namespace,
)
diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go
index 6e88bf429a..0bee4928df 100644
--- a/internal/controller/finalizers_delete.go
+++ b/internal/controller/finalizers_delete.go
@@ -20,6 +20,8 @@ import (
"context"
"github.com/cloudnative-pg/machinery/pkg/log"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@@ -28,36 +30,80 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
-// deleteDatabaseFinalizers deletes Database object finalizers when the cluster they were in has been deleted
-func (r *ClusterReconciler) deleteDatabaseFinalizers(ctx context.Context, namespacedName types.NamespacedName) error {
- contextLogger := log.FromContext(ctx)
+// ClusterReferrer is an object containing a cluster reference
+type ClusterReferrer interface {
+ GetClusterRef() corev1.LocalObjectReference
+ client.Object
+}
- databases := apiv1.DatabaseList{}
- if err := r.List(ctx,
- &databases,
- client.InNamespace(namespacedName.Namespace),
+// deleteFinalizers deletes object finalizers when the cluster they were in has been deleted
+func (r *ClusterReconciler) deleteFinalizers(ctx context.Context, namespacedName types.NamespacedName) error {
+ if err := r.deleteFinalizersForResource(
+ ctx,
+ namespacedName,
+ &apiv1.DatabaseList{},
+ utils.DatabaseFinalizerName,
); err != nil {
return err
}
- for idx := range databases.Items {
- database := &databases.Items[idx]
+ if err := r.deleteFinalizersForResource(
+ ctx,
+ namespacedName,
+ &apiv1.PublicationList{},
+ utils.PublicationFinalizerName,
+ ); err != nil {
+ return err
+ }
+
+ return r.deleteFinalizersForResource(
+ ctx,
+ namespacedName,
+ &apiv1.SubscriptionList{},
+ utils.SubscriptionFinalizerName,
+ )
+}
+
+// deleteFinalizersForResource deletes finalizers for a given resource type
+func (r *ClusterReconciler) deleteFinalizersForResource(
+ ctx context.Context,
+ namespacedName types.NamespacedName,
+ list client.ObjectList,
+ finalizerName string,
+) error {
+ contextLogger := log.FromContext(ctx)
+
+ if err := r.List(ctx, list, client.InNamespace(namespacedName.Namespace)); err != nil {
+ return err
+ }
+
+ items, err := meta.ExtractList(list)
+ if err != nil {
+ return err
+ }
+
+ for _, item := range items {
+ obj, ok := item.(ClusterReferrer)
+ if !ok {
+ continue
+ }
- if database.Spec.ClusterRef.Name != namespacedName.Name {
+ if obj.GetClusterRef().Name != namespacedName.Name {
continue
}
- origDatabase := database.DeepCopy()
- if controllerutil.RemoveFinalizer(database, utils.DatabaseFinalizerName) {
- contextLogger.Debug("Removing finalizer from database",
- "finalizer", utils.DatabaseFinalizerName, "database", database.Name)
- if err := r.Patch(ctx, database, client.MergeFrom(origDatabase)); err != nil {
+ origObj := obj.DeepCopyObject().(ClusterReferrer)
+ if controllerutil.RemoveFinalizer(obj, finalizerName) {
+ contextLogger.Debug("Removing finalizer from resource",
+ "finalizer", finalizerName, "resource", obj.GetName())
+ if err := r.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil {
contextLogger.Error(
err,
- "error while removing finalizer from database",
- "database", database.Name,
- "oldFinalizerList", origDatabase.ObjectMeta.Finalizers,
- "newFinalizerList", database.ObjectMeta.Finalizers,
+ "error while removing finalizer from resource",
+ "resource", obj.GetName(),
+ "kind", obj.GetObjectKind().GroupVersionKind().Kind,
+ "oldFinalizerList", origObj.GetFinalizers(),
+ "newFinalizerList", obj.GetFinalizers(),
)
return err
}
diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go
index 7354f68d83..cc6c0d5651 100644
--- a/internal/controller/finalizers_delete_test.go
+++ b/internal/controller/finalizers_delete_test.go
@@ -32,7 +32,8 @@ import (
. "github.com/onsi/gomega"
)
-var _ = Describe("Database CRD finalizers", func() {
+// nolint: dupl
+var _ = Describe("CRD finalizers", func() {
var (
r ClusterReconciler
scheme *runtime.Scheme
@@ -88,7 +89,7 @@ var _ = Describe("Database CRD finalizers", func() {
cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build()
r.Client = cli
- err := r.deleteDatabaseFinalizers(ctx, namespacedName)
+ err := r.deleteFinalizers(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
for _, db := range databaseList.Items {
@@ -123,7 +124,7 @@ var _ = Describe("Database CRD finalizers", func() {
cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build()
r.Client = cli
- err := r.deleteDatabaseFinalizers(ctx, namespacedName)
+ err := r.deleteFinalizers(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
database := &apiv1.Database{}
@@ -131,4 +132,166 @@ var _ = Describe("Database CRD finalizers", func() {
Expect(err).ToNot(HaveOccurred())
Expect(database.Finalizers).To(BeEquivalentTo([]string{utils.DatabaseFinalizerName}))
})
+
+ It("should delete publication finalizers for publications on the cluster", func(ctx SpecContext) {
+ publicationList := &apiv1.PublicationList{
+ Items: []apiv1.Publication{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Finalizers: []string{
+ utils.PublicationFinalizerName,
+ },
+ Name: "pub-1",
+ Namespace: "test",
+ },
+ Spec: apiv1.PublicationSpec{
+ Name: "pub-test",
+ ClusterRef: corev1.LocalObjectReference{
+ Name: "cluster",
+ },
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Finalizers: []string{
+ utils.PublicationFinalizerName,
+ },
+ Name: "pub-2",
+ Namespace: "test",
+ },
+ Spec: apiv1.PublicationSpec{
+ Name: "pub-test-2",
+ ClusterRef: corev1.LocalObjectReference{
+ Name: "cluster",
+ },
+ },
+ },
+ },
+ }
+
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build()
+ r.Client = cli
+ err := r.deleteFinalizers(ctx, namespacedName)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, pub := range publicationList.Items {
+ publication := &apiv1.Publication{}
+ err = cli.Get(ctx, client.ObjectKeyFromObject(&pub), publication)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(publication.Finalizers).To(BeZero())
+ }
+ })
+
+ It("should not delete publication finalizers for publications in another cluster", func(ctx SpecContext) {
+ publicationList := &apiv1.PublicationList{
+ Items: []apiv1.Publication{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Finalizers: []string{
+ utils.PublicationFinalizerName,
+ },
+ Name: "pub-1",
+ Namespace: "test",
+ },
+ Spec: apiv1.PublicationSpec{
+ Name: "pub-test",
+ ClusterRef: corev1.LocalObjectReference{
+ Name: "another-cluster",
+ },
+ },
+ },
+ },
+ }
+
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build()
+ r.Client = cli
+ err := r.deleteFinalizers(ctx, namespacedName)
+ Expect(err).ToNot(HaveOccurred())
+
+ publication := &apiv1.Publication{}
+ err = cli.Get(ctx, client.ObjectKeyFromObject(&publicationList.Items[0]), publication)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(publication.Finalizers).To(BeEquivalentTo([]string{utils.PublicationFinalizerName}))
+ })
+
+ It("should delete subscription finalizers for subscriptions on the cluster", func(ctx SpecContext) {
+ subscriptionList := &apiv1.SubscriptionList{
+ Items: []apiv1.Subscription{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Finalizers: []string{
+ utils.SubscriptionFinalizerName,
+ },
+ Name: "sub-1",
+ Namespace: "test",
+ },
+ Spec: apiv1.SubscriptionSpec{
+ Name: "sub-test",
+ ClusterRef: corev1.LocalObjectReference{
+ Name: "cluster",
+ },
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Finalizers: []string{
+ utils.SubscriptionFinalizerName,
+ },
+ Name: "sub-2",
+ Namespace: "test",
+ },
+ Spec: apiv1.SubscriptionSpec{
+ Name: "sub-test-2",
+ ClusterRef: corev1.LocalObjectReference{
+ Name: "cluster",
+ },
+ },
+ },
+ },
+ }
+
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build()
+ r.Client = cli
+ err := r.deleteFinalizers(ctx, namespacedName)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, sub := range subscriptionList.Items {
+ subscription := &apiv1.Subscription{}
+ err = cli.Get(ctx, client.ObjectKeyFromObject(&sub), subscription)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(subscription.Finalizers).To(BeZero())
+ }
+ })
+
+ It("should not delete subscription finalizers for subscriptions in another cluster", func(ctx SpecContext) {
+ subscriptionList := &apiv1.SubscriptionList{
+ Items: []apiv1.Subscription{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Finalizers: []string{
+ utils.SubscriptionFinalizerName,
+ },
+ Name: "sub-1",
+ Namespace: "test",
+ },
+ Spec: apiv1.SubscriptionSpec{
+ Name: "sub-test",
+ ClusterRef: corev1.LocalObjectReference{
+ Name: "another-cluster",
+ },
+ },
+ },
+ },
+ }
+
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build()
+ r.Client = cli
+ err := r.deleteFinalizers(ctx, namespacedName)
+ Expect(err).ToNot(HaveOccurred())
+
+ subscription := &apiv1.Subscription{}
+ err = cli.Get(ctx, client.ObjectKeyFromObject(&subscriptionList.Items[0]), subscription)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(subscription.Finalizers).To(BeEquivalentTo([]string{utils.SubscriptionFinalizerName}))
+ })
})
diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go
new file mode 100644
index 0000000000..c0d87aeb97
--- /dev/null
+++ b/internal/management/controller/common.go
@@ -0,0 +1,111 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "maps"
+ "slices"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/lib/pq"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+type markableAsFailed interface {
+ client.Object
+ SetAsFailed(err error)
+}
+
+// markAsFailed marks the reconciliation as failed and logs the corresponding error
+func markAsFailed(
+ ctx context.Context,
+ cli client.Client,
+ resource markableAsFailed,
+ err error,
+) error {
+ oldResource := resource.DeepCopyObject().(markableAsFailed)
+ resource.SetAsFailed(err)
+ return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource))
+}
+
+type markableAsUnknown interface {
+ client.Object
+ SetAsUnknown(err error)
+}
+
+// markAsFailed marks the reconciliation as failed and logs the corresponding error
+func markAsUnknown(
+ ctx context.Context,
+ cli client.Client,
+ resource markableAsUnknown,
+ err error,
+) error {
+ oldResource := resource.DeepCopyObject().(markableAsUnknown)
+ resource.SetAsUnknown(err)
+ return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource))
+}
+
+type markableAsReady interface {
+ client.Object
+ SetAsReady()
+}
+
+// markAsReady marks the reconciliation as succeeded inside the resource
+func markAsReady(
+ ctx context.Context,
+ cli client.Client,
+ resource markableAsReady,
+) error {
+ oldResource := resource.DeepCopyObject().(markableAsReady)
+ resource.SetAsReady()
+
+ return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource))
+}
+
+func getClusterFromInstance(
+ ctx context.Context,
+ cli client.Client,
+ instance instanceInterface,
+) (*apiv1.Cluster, error) {
+ var cluster apiv1.Cluster
+ err := cli.Get(ctx, types.NamespacedName{
+ Name: instance.GetClusterName(),
+ Namespace: instance.GetNamespaceName(),
+ }, &cluster)
+ return &cluster, err
+}
+
+func toPostgresParameters(parameters map[string]string) string {
+ if len(parameters) == 0 {
+ return ""
+ }
+
+ b := new(bytes.Buffer)
+ for _, key := range slices.Sorted(maps.Keys(parameters)) {
+ // TODO(armru): any alternative to pg.QuoteLiteral?
+ _, _ = fmt.Fprintf(b, "%s = %s, ", pgx.Identifier{key}.Sanitize(), pq.QuoteLiteral(parameters[key]))
+ }
+
+ // pruning last 2 chars `, `
+ return b.String()[:len(b.String())-2]
+}
diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go
index 7fb0b2a6f4..a82f8ce5a1 100644
--- a/internal/management/controller/database_controller.go
+++ b/internal/management/controller/database_controller.go
@@ -26,7 +26,6 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -308,18 +307,7 @@ func (r *DatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error {
// GetCluster gets the managed cluster through the client
func (r *DatabaseReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) {
- var cluster apiv1.Cluster
- err := r.Client.Get(ctx,
- types.NamespacedName{
- Namespace: r.instance.GetNamespaceName(),
- Name: r.instance.GetClusterName(),
- },
- &cluster)
- if err != nil {
- return nil, err
- }
-
- return &cluster, nil
+ return getClusterFromInstance(ctx, r.Client, r.instance)
}
func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.Database) error {
diff --git a/internal/management/controller/finalizers.go b/internal/management/controller/finalizers.go
new file mode 100644
index 0000000000..ed334d16fb
--- /dev/null
+++ b/internal/management/controller/finalizers.go
@@ -0,0 +1,49 @@
+package controller
+
+import (
+ "context"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+type finalizerReconciler[T client.Object] struct {
+ cli client.Client
+ finalizerName string
+ onRemoveFunc func(ctx context.Context, resource T) error
+}
+
+func newFinalizerReconciler[T client.Object](
+ cli client.Client,
+ finalizerName string,
+ onRemoveFunc func(ctx context.Context, resource T) error,
+) *finalizerReconciler[T] {
+ return &finalizerReconciler[T]{
+ cli: cli,
+ finalizerName: finalizerName,
+ onRemoveFunc: onRemoveFunc,
+ }
+}
+
+func (f finalizerReconciler[T]) reconcile(ctx context.Context, resource T) error {
+ // add finalizer in non-deleted publications if not present
+ if resource.GetDeletionTimestamp().IsZero() {
+ if !controllerutil.AddFinalizer(resource, f.finalizerName) {
+ return nil
+ }
+ return f.cli.Update(ctx, resource)
+ }
+
+ // the publication is being deleted but no finalizer is present, we can quit
+ if !controllerutil.ContainsFinalizer(resource, f.finalizerName) {
+ return nil
+ }
+
+ if err := f.onRemoveFunc(ctx, resource); err != nil {
+ return err
+ }
+
+ // remove our finalizer from the list and update it.
+ controllerutil.RemoveFinalizer(resource, f.finalizerName)
+ return f.cli.Update(ctx, resource)
+}
diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go
index b1c01130d7..426f85fd14 100644
--- a/internal/management/controller/manager.go
+++ b/internal/management/controller/manager.go
@@ -82,18 +82,7 @@ func (r *InstanceReconciler) Instance() *postgres.Instance {
// GetCluster gets the managed cluster through the client
func (r *InstanceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) {
- var cluster apiv1.Cluster
- err := r.GetClient().Get(ctx,
- types.NamespacedName{
- Namespace: r.instance.GetNamespaceName(),
- Name: r.instance.GetClusterName(),
- },
- &cluster)
- if err != nil {
- return nil, err
- }
-
- return &cluster, nil
+ return getClusterFromInstance(ctx, r.client, r.instance)
}
// GetSecret will get a named secret in the instance namespace
diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go
new file mode 100644
index 0000000000..f9d1bc8bd9
--- /dev/null
+++ b/internal/management/controller/publication_controller.go
@@ -0,0 +1,178 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// PublicationReconciler reconciles a Publication object
+type PublicationReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+
+ instance *postgres.Instance
+ finalizerReconciler *finalizerReconciler[*apiv1.Publication]
+}
+
+// publicationReconciliationInterval is the time between the
+// publication reconciliation loop failures
+const publicationReconciliationInterval = 30 * time.Second
+
+// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=publications,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=publications/status,verbs=get;update;patch
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+// TODO(user): Modify the Reconcile function to compare the state specified by
+// the Publication object against the actual cluster state, and then
+// perform operations to make the cluster state reflect the state specified by
+// the user.
+//
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile
+func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ contextLogger := log.FromContext(ctx)
+
+ contextLogger.Debug("Reconciliation loop start")
+ defer func() {
+ contextLogger.Debug("Reconciliation loop end")
+ }()
+
+ // Get the publication object
+ var publication apiv1.Publication
+ if err := r.Client.Get(ctx, client.ObjectKey{
+ Namespace: req.Namespace,
+ Name: req.Name,
+ }, &publication); err != nil {
+ contextLogger.Trace("Could not fetch Publication", "error", err)
+ return ctrl.Result{}, client.IgnoreNotFound(err)
+ }
+
+ // This is not for me!
+ if publication.Spec.ClusterRef.Name != r.instance.GetClusterName() {
+ contextLogger.Trace("Publication is not for this cluster",
+ "cluster", publication.Spec.ClusterRef.Name,
+ "expected", r.instance.GetClusterName(),
+ )
+ return ctrl.Result{}, nil
+ }
+
+ // If everything is reconciled, we're done here
+ if publication.Generation == publication.Status.ObservedGeneration {
+ return ctrl.Result{}, nil
+ }
+
+ // Fetch the Cluster from the cache
+ cluster, err := r.GetCluster(ctx)
+ if err != nil {
+ return ctrl.Result{}, markAsFailed(ctx, r.Client, &publication, fmt.Errorf("while fetching the cluster: %w", err))
+ }
+
+ // Still not for me, we're waiting for a switchover
+ if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary {
+ return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
+ }
+
+ // This is not for me, at least now
+ if cluster.Status.CurrentPrimary != r.instance.GetPodName() {
+ return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
+ }
+
+ // Cannot do anything on a replica cluster
+ if cluster.IsReplica() {
+ if err := markAsUnknown(ctx, r.Client, &publication, errClusterIsReplica); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
+ }
+
+ if err := r.finalizerReconciler.reconcile(ctx, &publication); err != nil {
+ return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err)
+ }
+ if !publication.GetDeletionTimestamp().IsZero() {
+ return ctrl.Result{}, nil
+ }
+
+ if err := r.alignPublication(ctx, &publication); err != nil {
+ if err := markAsFailed(ctx, r.Client, &publication, err); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
+ }
+
+ if err := markAsReady(ctx, r.Client, &publication); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
+}
+
+func (r *PublicationReconciler) evaluateDropPublication(ctx context.Context, pub *apiv1.Publication) error {
+ if pub.Spec.ReclaimPolicy != apiv1.PublicationReclaimDelete {
+ return nil
+ }
+ db, err := r.instance.ConnectionPool().Connection(pub.Spec.DBName)
+ if err != nil {
+ return fmt.Errorf("while getting DB connection: %w", err)
+ }
+
+ return executeDropPublication(ctx, db, pub.Spec.Name)
+}
+
+// NewPublicationReconciler creates a new publication reconciler
+func NewPublicationReconciler(
+ mgr manager.Manager,
+ instance *postgres.Instance,
+) *PublicationReconciler {
+ pr := &PublicationReconciler{
+ Client: mgr.GetClient(),
+ instance: instance,
+ }
+
+ pr.finalizerReconciler = newFinalizerReconciler(
+ mgr.GetClient(),
+ utils.PublicationFinalizerName,
+ pr.evaluateDropPublication,
+ )
+
+ return pr
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *PublicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&apiv1.Publication{}).
+ Named("instance-publication").
+ Complete(r)
+}
+
+// GetCluster gets the managed cluster through the client
+func (r *PublicationReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) {
+ return getClusterFromInstance(ctx, r.Client, r.instance)
+}
diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go
new file mode 100644
index 0000000000..e179e71bf2
--- /dev/null
+++ b/internal/management/controller/publication_controller_sql.go
@@ -0,0 +1,193 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+func (r *PublicationReconciler) alignPublication(ctx context.Context, obj *apiv1.Publication) error {
+ db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName)
+ if err != nil {
+ return fmt.Errorf("while getting DB connection: %w", err)
+ }
+
+ row := db.QueryRowContext(
+ ctx,
+ `
+ SELECT count(*)
+ FROM pg_publication
+ WHERE pubname = $1
+ `,
+ obj.Spec.Name)
+ if row.Err() != nil {
+ return fmt.Errorf("while getting publication status: %w", row.Err())
+ }
+
+ var count int
+ if err := row.Scan(&count); err != nil {
+ return fmt.Errorf("while getting publication status (scan): %w", err)
+ }
+
+ if count > 0 {
+ if err := r.patchPublication(ctx, db, obj); err != nil {
+ return fmt.Errorf("while patching publication: %w", err)
+ }
+ return nil
+ }
+
+ if err := r.createPublication(ctx, db, obj); err != nil {
+ return fmt.Errorf("while creating publication: %w", err)
+ }
+
+ return nil
+}
+
+func (r *PublicationReconciler) patchPublication(
+ ctx context.Context,
+ db *sql.DB,
+ obj *apiv1.Publication,
+) error {
+ sqls := toPublicationAlterSQL(obj)
+ for _, sqlQuery := range sqls {
+ if _, err := db.ExecContext(ctx, sqlQuery); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *PublicationReconciler) createPublication(
+ ctx context.Context,
+ db *sql.DB,
+ obj *apiv1.Publication,
+) error {
+ sqlQuery := toPublicationCreateSQL(obj)
+ _, err := db.ExecContext(ctx, sqlQuery)
+ return err
+}
+
+func toPublicationCreateSQL(obj *apiv1.Publication) string {
+ createQuery := fmt.Sprintf(
+ "CREATE PUBLICATION %s %s",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ toPublicationTargetSQL(&obj.Spec.Target),
+ )
+ if len(obj.Spec.Parameters) > 0 {
+ createQuery = fmt.Sprintf("%s WITH (%s)", createQuery, toPostgresParameters(obj.Spec.Parameters))
+ }
+
+ return createQuery
+}
+
+func toPublicationAlterSQL(obj *apiv1.Publication) []string {
+ result := make([]string, 0, 2)
+
+ if len(obj.Spec.Target.Objects) > 0 {
+ result = append(result,
+ fmt.Sprintf(
+ "ALTER PUBLICATION %s SET %s",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ toPublicationTargetObjectsSQL(&obj.Spec.Target),
+ ),
+ )
+ }
+
+ if len(obj.Spec.Parameters) > 0 {
+ result = append(result,
+ fmt.Sprintf(
+ "ALTER PUBLICATION %s SET (%s)",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ toPostgresParameters(obj.Spec.Parameters),
+ ),
+ )
+ }
+
+ return result
+}
+
+func executeDropPublication(ctx context.Context, db *sql.DB, name string) error {
+ if _, err := db.ExecContext(
+ ctx,
+ fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{name}.Sanitize()),
+ ); err != nil {
+ return fmt.Errorf("while dropping publication: %w", err)
+ }
+
+ return nil
+}
+
+func toPublicationTargetSQL(obj *apiv1.PublicationTarget) string {
+ if obj.AllTables {
+ return "FOR ALL TABLES"
+ }
+
+ result := toPublicationTargetObjectsSQL(obj)
+ if len(result) > 0 {
+ result = fmt.Sprintf("FOR %s", result)
+ }
+ return result
+}
+
+func toPublicationTargetObjectsSQL(obj *apiv1.PublicationTarget) string {
+ result := ""
+ for _, object := range obj.Objects {
+ if len(result) > 0 {
+ result += ", "
+ }
+ result += toPublicationObjectSQL(&object)
+ }
+
+ return result
+}
+
+func toPublicationObjectSQL(obj *apiv1.PublicationTargetObject) string {
+ if len(obj.TablesInSchema) > 0 {
+ return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.TablesInSchema}.Sanitize())
+ }
+
+ result := strings.Builder{}
+ result.WriteString("TABLE ")
+
+ if obj.Table.Only {
+ result.WriteString("ONLY ")
+ }
+
+ if len(obj.Table.Schema) > 0 {
+ result.WriteString(fmt.Sprintf("%s.", pgx.Identifier{obj.Table.Schema}.Sanitize()))
+ }
+
+ result.WriteString(pgx.Identifier{obj.Table.Name}.Sanitize())
+
+ if len(obj.Table.Columns) > 0 {
+ sanitizedColumns := make([]string, 0, len(obj.Table.Columns))
+ for _, column := range obj.Table.Columns {
+ sanitizedColumns = append(sanitizedColumns, pgx.Identifier{column}.Sanitize())
+ }
+ result.WriteString(fmt.Sprintf(" (%s)", strings.Join(sanitizedColumns, ", ")))
+ }
+
+ return result.String()
+}
diff --git a/internal/management/controller/publication_controller_sql_test.go b/internal/management/controller/publication_controller_sql_test.go
new file mode 100644
index 0000000000..b993b93576
--- /dev/null
+++ b/internal/management/controller/publication_controller_sql_test.go
@@ -0,0 +1,225 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// nolint: dupl
+package controller
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/jackc/pgx/v5"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("publication sql", func() {
+ var (
+ dbMock sqlmock.Sqlmock
+ db *sql.DB
+ )
+
+ BeforeEach(func() {
+ var err error
+ db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(dbMock.ExpectationsWereMet()).To(Succeed())
+ })
+
+ It("drops the publication successfully", func(ctx SpecContext) {
+ dbMock.ExpectExec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{"publication_name"}.Sanitize())).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := executeDropPublication(ctx, db, "publication_name")
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("returns an error when dropping the publication fails", func(ctx SpecContext) {
+ dbMock.ExpectExec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s",
+ pgx.Identifier{"publication_name"}.Sanitize())).
+ WillReturnError(fmt.Errorf("drop publication error"))
+
+ err := executeDropPublication(ctx, db, "publication_name")
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("while dropping publication: drop publication error"))
+ })
+
+ It("sanitizes the publication name correctly", func(ctx SpecContext) {
+ dbMock.ExpectExec(
+ fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{"sanitized_name"}.Sanitize())).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := executeDropPublication(ctx, db, "sanitized_name")
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("generates correct SQL for altering publication with target objects", func() {
+ obj := &apiv1.Publication{
+ Spec: apiv1.PublicationSpec{
+ Name: "test_pub",
+ Target: apiv1.PublicationTarget{
+ Objects: []apiv1.PublicationTargetObject{
+ {TablesInSchema: "public"},
+ },
+ },
+ },
+ }
+
+ sqls := toPublicationAlterSQL(obj)
+ Expect(sqls).To(ContainElement(`ALTER PUBLICATION "test_pub" SET TABLES IN SCHEMA "public"`))
+ })
+
+ It("generates correct SQL for altering publication with parameters", func() {
+ obj := &apiv1.Publication{
+ Spec: apiv1.PublicationSpec{
+ Name: "test_pub",
+ Parameters: map[string]string{
+ "param1": "value1",
+ "param2": "value2",
+ },
+ },
+ }
+
+ sqls := toPublicationAlterSQL(obj)
+ Expect(sqls).To(ContainElement(`ALTER PUBLICATION "test_pub" SET ("param1" = 'value1', "param2" = 'value2')`))
+ })
+
+ It("returns empty SQL list when no alterations are needed", func() {
+ obj := &apiv1.Publication{
+ Spec: apiv1.PublicationSpec{
+ Name: "test_pub",
+ },
+ }
+
+ sqls := toPublicationAlterSQL(obj)
+ Expect(sqls).To(BeEmpty())
+ })
+
+ It("generates correct SQL for creating publication with target schema", func() {
+ obj := &apiv1.Publication{
+ Spec: apiv1.PublicationSpec{
+ Name: "test_pub",
+ Target: apiv1.PublicationTarget{
+ Objects: []apiv1.PublicationTargetObject{
+ {TablesInSchema: "public"},
+ },
+ },
+ },
+ }
+
+ sql := toPublicationCreateSQL(obj)
+ Expect(sql).To(Equal(`CREATE PUBLICATION "test_pub" FOR TABLES IN SCHEMA "public"`))
+ })
+
+ It("generates correct SQL for creating publication with target table", func() {
+ obj := &apiv1.Publication{
+ Spec: apiv1.PublicationSpec{
+ Name: "test_pub",
+ Target: apiv1.PublicationTarget{
+ Objects: []apiv1.PublicationTargetObject{
+ {Table: &apiv1.PublicationTargetTable{Name: "table", Schema: "test", Columns: []string{"a", "b"}}},
+ },
+ },
+ },
+ }
+
+ sql := toPublicationCreateSQL(obj)
+ Expect(sql).To(Equal(`CREATE PUBLICATION "test_pub" FOR TABLE "test"."table" ("a", "b")`))
+ })
+
+ It("generates correct SQL for creating publication with parameters", func() {
+ obj := &apiv1.Publication{
+ Spec: apiv1.PublicationSpec{
+ Name: "test_pub",
+ Parameters: map[string]string{
+ "param1": "value1",
+ "param2": "value2",
+ },
+ Target: apiv1.PublicationTarget{
+ Objects: []apiv1.PublicationTargetObject{
+ {TablesInSchema: "public"},
+ },
+ },
+ },
+ }
+
+ sql := toPublicationCreateSQL(obj)
+ Expect(sql).To(Equal(
+ `CREATE PUBLICATION "test_pub" FOR TABLES IN SCHEMA "public" WITH ("param1" = 'value1', "param2" = 'value2')`,
+ ))
+ })
+})
+
+var _ = Describe("toPublicationObjectSQL", func() {
+ It("returns correct SQL for tables in schema", func() {
+ obj := &apiv1.PublicationTargetObject{
+ TablesInSchema: "public",
+ }
+ result := toPublicationObjectSQL(obj)
+ Expect(result).To(Equal(`TABLES IN SCHEMA "public"`))
+ })
+
+ It("returns correct SQL for table with schema and columns", func() {
+ obj := &apiv1.PublicationTargetObject{
+ Table: &apiv1.PublicationTargetTable{
+ Name: "table",
+ Schema: "test",
+ Columns: []string{"a", "b"},
+ },
+ }
+ result := toPublicationObjectSQL(obj)
+ Expect(result).To(Equal(`TABLE "test"."table" ("a", "b")`))
+ })
+
+ It("returns correct SQL for table with only clause", func() {
+ obj := &apiv1.PublicationTargetObject{
+ Table: &apiv1.PublicationTargetTable{
+ Name: "table",
+ Only: true,
+ },
+ }
+ result := toPublicationObjectSQL(obj)
+ Expect(result).To(Equal(`TABLE ONLY "table"`))
+ })
+
+ It("returns correct SQL for table without schema and columns", func() {
+ obj := &apiv1.PublicationTargetObject{
+ Table: &apiv1.PublicationTargetTable{
+ Name: "table",
+ },
+ }
+ result := toPublicationObjectSQL(obj)
+ Expect(result).To(Equal(`TABLE "table"`))
+ })
+
+ It("returns correct SQL for table with schema but without columns", func() {
+ obj := &apiv1.PublicationTargetObject{
+ Table: &apiv1.PublicationTargetTable{
+ Name: "table",
+ Schema: "test",
+ },
+ }
+ result := toPublicationObjectSQL(obj)
+ Expect(result).To(Equal(`TABLE "test"."table"`))
+ })
+})
diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go
new file mode 100644
index 0000000000..f1a3af65bf
--- /dev/null
+++ b/internal/management/controller/subscription_controller.go
@@ -0,0 +1,195 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// SubscriptionReconciler reconciles a Subscription object
+type SubscriptionReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+
+ instance *postgres.Instance
+ finalizerReconciler *finalizerReconciler[*apiv1.Subscription]
+}
+
+// subscriptionReconciliationInterval is the time between the
+// subscription reconciliation loop failures
+const subscriptionReconciliationInterval = 30 * time.Second
+
+// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=subscriptions,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=subscriptions/status,verbs=get;update;patch
+
+// Reconcile is the subscription reconciliation loop
+func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ contextLogger := log.FromContext(ctx)
+
+ contextLogger.Debug("Reconciliation loop start")
+ defer func() {
+ contextLogger.Debug("Reconciliation loop end")
+ }()
+
+ // Get the subscription object
+ var subscription apiv1.Subscription
+ if err := r.Client.Get(ctx, client.ObjectKey{
+ Namespace: req.Namespace,
+ Name: req.Name,
+ }, &subscription); err != nil {
+ contextLogger.Trace("Could not fetch Subscription", "error", err)
+ return ctrl.Result{}, client.IgnoreNotFound(err)
+ }
+
+ // This is not for me!
+ if subscription.Spec.ClusterRef.Name != r.instance.GetClusterName() {
+ contextLogger.Trace("Subscription is not for this cluster",
+ "cluster", subscription.Spec.ClusterRef.Name,
+ "expected", r.instance.GetClusterName(),
+ )
+ return ctrl.Result{}, nil
+ }
+
+ // If everything is reconciled, we're done here
+ if subscription.Generation == subscription.Status.ObservedGeneration {
+ return ctrl.Result{}, nil
+ }
+
+ // Fetch the Cluster from the cache
+ cluster, err := r.GetCluster(ctx)
+ if err != nil {
+ return ctrl.Result{}, markAsFailed(ctx, r.Client, &subscription, fmt.Errorf("while fetching the cluster: %w", err))
+ }
+
+ // Still not for me, we're waiting for a switchover
+ if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary {
+ return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
+ }
+
+ // This is not for me, at least now
+ if cluster.Status.CurrentPrimary != r.instance.GetPodName() {
+ return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
+ }
+
+ // Cannot do anything on a replica cluster
+ if cluster.IsReplica() {
+ if err := markAsUnknown(ctx, r.Client, &subscription, errClusterIsReplica); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
+ }
+
+ if err := r.finalizerReconciler.reconcile(ctx, &subscription); err != nil {
+ return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err)
+ }
+ if !subscription.GetDeletionTimestamp().IsZero() {
+ return ctrl.Result{}, nil
+ }
+
+ // Let's get the connection string
+ connString, err := getSubscriptionConnectionString(
+ cluster,
+ subscription.Spec.ExternalClusterName,
+ subscription.Spec.PublicationDBName,
+ )
+ if err != nil {
+ if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
+ }
+
+ if err := r.alignSubscription(ctx, &subscription, connString); err != nil {
+ if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
+ }
+
+ if err := markAsReady(ctx, r.Client, &subscription); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
+}
+
+func (r *SubscriptionReconciler) evaluateDropSubscription(ctx context.Context, sub *apiv1.Subscription) error {
+ if sub.Spec.ReclaimPolicy != apiv1.SubscriptionReclaimDelete {
+ return nil
+ }
+
+ db, err := r.instance.ConnectionPool().Connection(sub.Spec.DBName)
+ if err != nil {
+ return fmt.Errorf("while getting DB connection: %w", err)
+ }
+ return executeDropSubscription(ctx, db, sub.Spec.Name)
+}
+
+// NewSubscriptionReconciler creates a new subscription reconciler
+func NewSubscriptionReconciler(
+ mgr manager.Manager,
+ instance *postgres.Instance,
+) *SubscriptionReconciler {
+ sr := &SubscriptionReconciler{Client: mgr.GetClient(), instance: instance}
+ sr.finalizerReconciler = newFinalizerReconciler(
+ mgr.GetClient(),
+ utils.SubscriptionFinalizerName,
+ sr.evaluateDropSubscription,
+ )
+
+ return sr
+}
+
+// SetupWithManager sets up the controller with the Manager
+func (r *SubscriptionReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&apiv1.Subscription{}).
+ Named("instance-subscription").
+ Complete(r)
+}
+
+// GetCluster gets the managed cluster through the client
+func (r *SubscriptionReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) {
+ return getClusterFromInstance(ctx, r.Client, r.instance)
+}
+
+// getSubscriptionConnectionString gets the connection string to be used to connect to
+// the specified external cluster, while connected to a pod of the specified cluster
+func getSubscriptionConnectionString(
+ cluster *apiv1.Cluster,
+ externalClusterName string,
+ databaseName string,
+) (string, error) {
+ externalCluster, ok := cluster.ExternalCluster(externalClusterName)
+ if !ok {
+ return "", fmt.Errorf("externalCluster '%s' not declared in cluster %s", externalClusterName, cluster.Name)
+ }
+
+ return external.GetServerConnectionString(&externalCluster, databaseName), nil
+}
diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go
new file mode 100644
index 0000000000..47f9f945df
--- /dev/null
+++ b/internal/management/controller/subscription_controller_sql.go
@@ -0,0 +1,150 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/lib/pq"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+func (r *SubscriptionReconciler) alignSubscription(
+ ctx context.Context,
+ obj *apiv1.Subscription,
+ connString string,
+) error {
+ db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName)
+ if err != nil {
+ return fmt.Errorf("while getting DB connection: %w", err)
+ }
+
+ row := db.QueryRowContext(
+ ctx,
+ `
+ SELECT count(*)
+ FROM pg_subscription
+ WHERE subname = $1
+ `,
+ obj.Spec.Name)
+ if row.Err() != nil {
+ return fmt.Errorf("while getting subscription status: %w", row.Err())
+ }
+
+ var count int
+ if err := row.Scan(&count); err != nil {
+ return fmt.Errorf("while getting subscription status (scan): %w", err)
+ }
+
+ if count > 0 {
+ if err := r.patchSubscription(ctx, db, obj, connString); err != nil {
+ return fmt.Errorf("while patching subscription: %w", err)
+ }
+ return nil
+ }
+
+ if err := r.createSubscription(ctx, db, obj, connString); err != nil {
+ return fmt.Errorf("while creating subscription: %w", err)
+ }
+
+ return nil
+}
+
+func (r *SubscriptionReconciler) patchSubscription(
+ ctx context.Context,
+ db *sql.DB,
+ obj *apiv1.Subscription,
+ connString string,
+) error {
+ sqls := toSubscriptionAlterSQL(obj, connString)
+ for _, sqlQuery := range sqls {
+ if _, err := db.ExecContext(ctx, sqlQuery); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *SubscriptionReconciler) createSubscription(
+ ctx context.Context,
+ db *sql.DB,
+ obj *apiv1.Subscription,
+ connString string,
+) error {
+ sqlQuery := toSubscriptionCreateSQL(obj, connString)
+ _, err := db.ExecContext(ctx, sqlQuery)
+ return err
+}
+
+func toSubscriptionCreateSQL(obj *apiv1.Subscription, connString string) string {
+ createQuery := fmt.Sprintf(
+ "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ pq.QuoteLiteral(connString),
+ pgx.Identifier{obj.Spec.PublicationName}.Sanitize(),
+ )
+ if len(obj.Spec.Parameters) > 0 {
+ createQuery = fmt.Sprintf("%s WITH (%s)", createQuery, toPostgresParameters(obj.Spec.Parameters))
+ }
+
+ return createQuery
+}
+
+func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string) []string {
+ result := make([]string, 0, 3)
+
+ setPublicationSQL := fmt.Sprintf(
+ "ALTER SUBSCRIPTION %s SET PUBLICATION %s",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ pgx.Identifier{obj.Spec.PublicationName}.Sanitize(),
+ )
+
+ setConnStringSQL := fmt.Sprintf(
+ "ALTER SUBSCRIPTION %s CONNECTION %s",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ pq.QuoteLiteral(connString),
+ )
+ result = append(result, setPublicationSQL, setConnStringSQL)
+
+ if len(obj.Spec.Parameters) > 0 {
+ result = append(result,
+ fmt.Sprintf(
+ "ALTER SUBSCRIPTION %s SET (%s)",
+ pgx.Identifier{obj.Spec.Name}.Sanitize(),
+ toPostgresParameters(obj.Spec.Parameters),
+ ),
+ )
+ }
+
+ return result
+}
+
+func executeDropSubscription(ctx context.Context, db *sql.DB, name string) error {
+ if _, err := db.ExecContext(
+ ctx,
+ fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{name}.Sanitize()),
+ ); err != nil {
+ return fmt.Errorf("while dropping subscription: %w", err)
+ }
+
+ return nil
+}
diff --git a/internal/management/controller/subscription_controller_sql_test.go b/internal/management/controller/subscription_controller_sql_test.go
new file mode 100644
index 0000000000..8afe3019f6
--- /dev/null
+++ b/internal/management/controller/subscription_controller_sql_test.go
@@ -0,0 +1,169 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// nolint: dupl
+package controller
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/jackc/pgx/v5"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+// nolint: dupl
+var _ = Describe("subscription sql", func() {
+ var (
+ dbMock sqlmock.Sqlmock
+ db *sql.DB
+ )
+
+ BeforeEach(func() {
+ var err error
+ db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(dbMock.ExpectationsWereMet()).To(Succeed())
+ })
+
+ It("drops the subscription successfully", func(ctx SpecContext) {
+ dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"subscription_name"}.Sanitize())).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := executeDropSubscription(ctx, db, "subscription_name")
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("returns an error when dropping the subscription fails", func(ctx SpecContext) {
+ dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"subscription_name"}.Sanitize())).
+ WillReturnError(fmt.Errorf("drop subscription error"))
+
+ err := executeDropSubscription(ctx, db, "subscription_name")
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("while dropping subscription: drop subscription error"))
+ })
+
+ It("sanitizes the subscription name correctly", func(ctx SpecContext) {
+ dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"sanitized_name"}.Sanitize())).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ err := executeDropSubscription(ctx, db, "sanitized_name")
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("generates correct SQL for creating subscription with publication and connection string", func() {
+ obj := &apiv1.Subscription{
+ Spec: apiv1.SubscriptionSpec{
+ Name: "test_sub",
+ PublicationName: "test_pub",
+ },
+ }
+ connString := "host=localhost user=test dbname=test"
+
+ sql := toSubscriptionCreateSQL(obj, connString)
+ Expect(sql).To(Equal(
+ `CREATE SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test' PUBLICATION "test_pub"`))
+ })
+
+ It("generates correct SQL for creating subscription with parameters", func() {
+ obj := &apiv1.Subscription{
+ Spec: apiv1.SubscriptionSpec{
+ Name: "test_sub",
+ PublicationName: "test_pub",
+ Parameters: map[string]string{
+ "param1": "value1",
+ "param2": "value2",
+ },
+ },
+ }
+ connString := "host=localhost user=test dbname=test"
+
+ sql := toSubscriptionCreateSQL(obj, connString)
+ expectedElement := `CREATE SUBSCRIPTION "test_sub" ` +
+ `CONNECTION 'host=localhost user=test dbname=test' ` +
+ `PUBLICATION "test_pub" WITH ("param1" = 'value1', "param2" = 'value2')`
+ Expect(sql).To(Equal(expectedElement))
+ })
+
+ It("returns correct SQL for creating subscription with no owner or parameters", func() {
+ obj := &apiv1.Subscription{
+ Spec: apiv1.SubscriptionSpec{
+ Name: "test_sub",
+ PublicationName: "test_pub",
+ },
+ }
+ connString := "host=localhost user=test dbname=test"
+
+ sql := toSubscriptionCreateSQL(obj, connString)
+ Expect(sql).To(Equal(
+ `CREATE SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test' PUBLICATION "test_pub"`))
+ })
+
+ It("generates correct SQL for altering subscription with publication and connection string", func() {
+ obj := &apiv1.Subscription{
+ Spec: apiv1.SubscriptionSpec{
+ Name: "test_sub",
+ PublicationName: "test_pub",
+ },
+ }
+ connString := "host=localhost user=test dbname=test"
+
+ sqls := toSubscriptionAlterSQL(obj, connString)
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`))
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`))
+ })
+
+ It("generates correct SQL for altering subscription with parameters", func() {
+ obj := &apiv1.Subscription{
+ Spec: apiv1.SubscriptionSpec{
+ Name: "test_sub",
+ PublicationName: "test_pub",
+ Parameters: map[string]string{
+ "param1": "value1",
+ "param2": "value2",
+ },
+ },
+ }
+ connString := "host=localhost user=test dbname=test"
+
+ sqls := toSubscriptionAlterSQL(obj, connString)
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`))
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`))
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET ("param1" = 'value1', "param2" = 'value2')`))
+ })
+
+ It("returns correct SQL for altering subscription with no owner or parameters", func() {
+ obj := &apiv1.Subscription{
+ Spec: apiv1.SubscriptionSpec{
+ Name: "test_sub",
+ PublicationName: "test_pub",
+ },
+ }
+ connString := "host=localhost user=test dbname=test"
+
+ sqls := toSubscriptionAlterSQL(obj, connString)
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`))
+ Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`))
+ })
+})
diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go
new file mode 100644
index 0000000000..901ab93f61
--- /dev/null
+++ b/internal/management/controller/subscription_controller_test.go
@@ -0,0 +1,32 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Conversion of PG parameters from map to string of key/value pairs", func() {
+ It("returns expected well-formed list", func() {
+ m := map[string]string{
+ "a": "1", "b": "2",
+ }
+ res := toPostgresParameters(m)
+ Expect(res).To(Equal(`"a" = '1', "b" = '2'`))
+ })
+})
diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go
index f0d9bf4cb1..ac328cc66e 100644
--- a/pkg/specs/roles.go
+++ b/pkg/specs/roles.go
@@ -154,6 +154,62 @@ func CreateRole(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) rbacv1.Role {
"update",
},
},
+ {
+ APIGroups: []string{
+ "postgresql.cnpg.io",
+ },
+ Resources: []string{
+ "publications",
+ },
+ Verbs: []string{
+ "get",
+ "update",
+ "list",
+ "watch",
+ },
+ ResourceNames: []string{},
+ },
+ {
+ APIGroups: []string{
+ "postgresql.cnpg.io",
+ },
+ Resources: []string{
+ "publications/status",
+ },
+ Verbs: []string{
+ "get",
+ "patch",
+ "update",
+ },
+ },
+ {
+ APIGroups: []string{
+ "postgresql.cnpg.io",
+ },
+ Resources: []string{
+ "subscriptions",
+ },
+ Verbs: []string{
+ "get",
+ "update",
+ "list",
+ "watch",
+ },
+ ResourceNames: []string{},
+ },
+ {
+ APIGroups: []string{
+ "postgresql.cnpg.io",
+ },
+ Resources: []string{
+ "subscriptions/status",
+ },
+ Verbs: []string{
+ "get",
+ "patch",
+ "update",
+ },
+ },
}
return rbacv1.Role{
diff --git a/pkg/specs/roles_test.go b/pkg/specs/roles_test.go
index 3753a66154..0d3df97d28 100644
--- a/pkg/specs/roles_test.go
+++ b/pkg/specs/roles_test.go
@@ -165,7 +165,7 @@ var _ = Describe("Roles", func() {
serviceAccount := CreateRole(cluster, nil)
Expect(serviceAccount.Name).To(Equal(cluster.Name))
Expect(serviceAccount.Namespace).To(Equal(cluster.Namespace))
- Expect(serviceAccount.Rules).To(HaveLen(9))
+ Expect(serviceAccount.Rules).To(HaveLen(13))
})
It("should contain every secret of the origin backup and backup configuration of every external cluster", func() {
diff --git a/pkg/utils/finalizers.go b/pkg/utils/finalizers.go
index 81d958df6d..ba9ed64f16 100644
--- a/pkg/utils/finalizers.go
+++ b/pkg/utils/finalizers.go
@@ -20,4 +20,12 @@ const (
// DatabaseFinalizerName is the name of the finalizer
// triggering the deletion of the database
DatabaseFinalizerName = MetadataNamespace + "/deleteDatabase"
+
+ // PublicationFinalizerName is the name of the finalizer
+ // triggering the deletion of the publication
+ PublicationFinalizerName = MetadataNamespace + "/deletePublication"
+
+ // SubscriptionFinalizerName is the name of the finalizer
+ // triggering the deletion of the subscription
+ SubscriptionFinalizerName = MetadataNamespace + "/deleteSubscription"
)
diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template b/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template
new file mode 100644
index 0000000000..1597981714
--- /dev/null
+++ b/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template
@@ -0,0 +1,48 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: destination-cluster
+spec:
+ instances: 1
+ externalClusters:
+ - name: source-cluster
+ connectionParameters:
+ host: source-cluster-rw
+ user: app
+ dbname: declarative
+ port: "5432"
+ password:
+ name: source-cluster-app
+ key: password
+
+ postgresql:
+ parameters:
+ max_connections: "110"
+ log_checkpoints: "on"
+ log_lock_waits: "on"
+ log_min_duration_statement: '1000'
+ log_statement: 'ddl'
+ log_temp_files: '1024'
+ log_autovacuum_min_duration: '1s'
+ log_replication_commands: 'on'
+
+ # Example of rolling update strategy:
+ # - unsupervised: automated update of the primary once all
+ # replicas have been upgraded (default)
+ # - supervised: requires manual supervision to perform
+ # the switchover of the primary
+ primaryUpdateStrategy: unsupervised
+ primaryUpdateMethod: switchover
+
+ bootstrap:
+ initdb:
+ database: app
+ owner: app
+
+ # Persistent storage configuration
+ storage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
+ walStorage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml
new file mode 100644
index 0000000000..2a6e122647
--- /dev/null
+++ b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml
@@ -0,0 +1,9 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Database
+metadata:
+ name: destination-db-declarative
+spec:
+ name: declarative
+ owner: app
+ cluster:
+ name: destination-cluster
diff --git a/tests/e2e/fixtures/declarative_pub_sub/pub.yaml b/tests/e2e/fixtures/declarative_pub_sub/pub.yaml
new file mode 100644
index 0000000000..bd09d64014
--- /dev/null
+++ b/tests/e2e/fixtures/declarative_pub_sub/pub.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Publication
+metadata:
+ name: publication-declarative
+spec:
+ name: pub
+ dbname: declarative
+ cluster:
+ name: source-cluster
+ target:
+ allTables: true
diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template b/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template
new file mode 100644
index 0000000000..398a6613c8
--- /dev/null
+++ b/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template
@@ -0,0 +1,48 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: source-cluster
+spec:
+ instances: 1
+
+ postgresql:
+ parameters:
+ max_connections: "110"
+ log_checkpoints: "on"
+ log_lock_waits: "on"
+ log_min_duration_statement: '1000'
+ log_statement: 'ddl'
+ log_temp_files: '1024'
+ log_autovacuum_min_duration: '1s'
+ log_replication_commands: 'on'
+ pg_hba:
+ - hostssl replication app all scram-sha-256
+
+ managed:
+ roles:
+ - name: app
+ ensure: present
+ login: true
+ replication: true
+
+
+ # Example of rolling update strategy:
+ # - unsupervised: automated update of the primary once all
+ # replicas have been upgraded (default)
+ # - supervised: requires manual supervision to perform
+ # the switchover of the primary
+ primaryUpdateStrategy: unsupervised
+ primaryUpdateMethod: switchover
+
+ bootstrap:
+ initdb:
+ database: app
+ owner: app
+
+ # Persistent storage configuration
+ storage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
+ walStorage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml
new file mode 100644
index 0000000000..80d5a4cf27
--- /dev/null
+++ b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml
@@ -0,0 +1,9 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Database
+metadata:
+ name: source-db-declarative
+spec:
+ name: declarative
+ owner: app
+ cluster:
+ name: source-cluster
diff --git a/tests/e2e/fixtures/declarative_pub_sub/sub.yaml b/tests/e2e/fixtures/declarative_pub_sub/sub.yaml
new file mode 100644
index 0000000000..8eb5aabdc4
--- /dev/null
+++ b/tests/e2e/fixtures/declarative_pub_sub/sub.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Subscription
+metadata:
+ name: subscription-declarative
+spec:
+ name: sub
+ dbname: declarative
+ publicationName: pub
+ cluster:
+ name: destination-cluster
+ externalClusterName: source-cluster
diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go
new file mode 100644
index 0000000000..3133bd3ef7
--- /dev/null
+++ b/tests/e2e/publication_subscription_test.go
@@ -0,0 +1,236 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "time"
+
+ "k8s.io/apimachinery/pkg/types"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests"
+ testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+// - spinning up a cluster, apply a declarative publication/subscription on it
+
+// Set of tests in which we use the declarative publication and subscription CRDs on an existing cluster
+var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePubSub), func() {
+ const (
+ sourceClusterManifest = fixturesDir + "/declarative_pub_sub/source-cluster.yaml.template"
+ destinationClusterManifest = fixturesDir + "/declarative_pub_sub/destination-cluster.yaml.template"
+ sourceDatabaseManifest = fixturesDir + "/declarative_pub_sub/source-database.yaml"
+ destinationDatabaseManifest = fixturesDir + "/declarative_pub_sub/destination-database.yaml"
+ pubManifest = fixturesDir + "/declarative_pub_sub/pub.yaml"
+ subManifest = fixturesDir + "/declarative_pub_sub/sub.yaml"
+ level = tests.Medium
+ )
+
+ BeforeEach(func() {
+ if testLevelEnv.Depth < int(level) {
+ Skip("Test depth is lower than the amount requested for this test")
+ }
+ })
+
+ Context("in a plain vanilla cluster", Ordered, func() {
+ const (
+ namespacePrefix = "declarative-pub-sub"
+ dbname = "declarative"
+ tableName = "test"
+ )
+ var (
+ sourceClusterName, destinationClusterName, namespace string
+ databaseObjectName, pubObjectName, subObjectName string
+ pub *apiv1.Publication
+ sub *apiv1.Subscription
+ err error
+ )
+
+ BeforeAll(func() {
+ // Create a cluster in a namespace we'll delete after the test
+ namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ Expect(err).ToNot(HaveOccurred())
+
+ sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterManifest)
+ Expect(err).ToNot(HaveOccurred())
+
+ destinationClusterName, err = env.GetResourceNameFromYAML(destinationClusterManifest)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("setting up source cluster", func() {
+ AssertCreateCluster(namespace, sourceClusterName, sourceClusterManifest, env)
+ })
+
+ By("setting up destination cluster", func() {
+ AssertCreateCluster(namespace, destinationClusterName, destinationClusterManifest, env)
+ })
+ })
+
+ assertCreateDatabase := func(namespace, clusterName, databaseManifest, databaseName string) {
+ databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest)
+ Expect(err).NotTo(HaveOccurred())
+
+ By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() {
+ CreateResourceFromFile(namespace, databaseManifest)
+ })
+
+ By(fmt.Sprintf("ensuring the %s Database CRD succeeded reconciliation", databaseObjectName), func() {
+ databaseObject := &apiv1.Database{}
+ databaseNamespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: databaseObjectName,
+ }
+
+ Eventually(func(g Gomega) {
+ err := env.Client.Get(env.Ctx, databaseNamespacedName, databaseObject)
+ Expect(err).ToNot(HaveOccurred())
+ g.Expect(databaseObject.Status.Applied).Should(HaveValue(BeTrue()))
+ }, 300).WithPolling(10 * time.Second).Should(Succeed())
+ })
+
+ By(fmt.Sprintf("verifying the %s database has been created", databaseName), func() {
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ AssertDatabaseExists(primaryPodInfo, databaseName, true)
+ })
+ }
+
+ assertPublicationExists := func(namespace, primaryPod string, pub *apiv1.Publication) {
+ query := fmt.Sprintf("select count(*) from pg_publication where pubname = '%s'",
+ pub.Spec.Name)
+ Eventually(func(g Gomega) {
+ stdout, _, err := env.ExecQueryInInstancePod(
+ testUtils.PodLocator{
+ Namespace: namespace,
+ PodName: primaryPod,
+ },
+ dbname,
+ query)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(stdout).Should(ContainSubstring("1"), "expected publication not found")
+ }, 30).Should(Succeed())
+ }
+
+ assertSubscriptionExists := func(namespace, primaryPod string, sub *apiv1.Subscription) {
+ query := fmt.Sprintf("select count(*) from pg_subscription where subname = '%s'",
+ sub.Spec.Name)
+ Eventually(func(g Gomega) {
+ stdout, _, err := env.ExecQueryInInstancePod(
+ testUtils.PodLocator{
+ Namespace: namespace,
+ PodName: primaryPod,
+ },
+ dbname,
+ query)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(stdout).Should(ContainSubstring("1"), "expected subscription not found")
+ }, 30).Should(Succeed())
+ }
+
+ It("can perform logical replication", func() {
+ assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest, dbname)
+
+ tableLocator := TableLocator{
+ Namespace: namespace,
+ ClusterName: sourceClusterName,
+ DatabaseName: dbname,
+ TableName: tableName,
+ }
+ AssertCreateTestData(env, tableLocator)
+
+ assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest, dbname)
+
+ By("creating an empty table inside the destination database", func() {
+ query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName)
+ _, err = testUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname,
+ apiv1.ApplicationUserSecretSuffix, query)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ By("applying Publication CRD manifest", func() {
+ CreateResourceFromFile(namespace, pubManifest)
+ pubObjectName, err = env.GetResourceNameFromYAML(pubManifest)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ By("ensuring the Publication CRD succeeded reconciliation", func() {
+ // get publication object
+ pub = &apiv1.Publication{}
+ pubNamespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: pubObjectName,
+ }
+
+ Eventually(func(g Gomega) {
+ err := env.Client.Get(env.Ctx, pubNamespacedName, pub)
+ Expect(err).ToNot(HaveOccurred())
+ g.Expect(pub.Status.Applied).Should(HaveValue(BeTrue()))
+ }, 300).WithPolling(10 * time.Second).Should(Succeed())
+ })
+
+ By("verifying new publication has been created", func() {
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ assertPublicationExists(namespace, primaryPodInfo.Name, pub)
+ })
+
+ By("applying Subscription CRD manifest", func() {
+ CreateResourceFromFile(namespace, subManifest)
+ subObjectName, err = env.GetResourceNameFromYAML(subManifest)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ By("ensuring the Subscription CRD succeeded reconciliation", func() {
+ // get subscription object
+ sub = &apiv1.Subscription{}
+ pubNamespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: subObjectName,
+ }
+
+ Eventually(func(g Gomega) {
+ err := env.Client.Get(env.Ctx, pubNamespacedName, sub)
+ Expect(err).ToNot(HaveOccurred())
+ g.Expect(sub.Status.Applied).Should(HaveValue(BeTrue()))
+ }, 300).WithPolling(10 * time.Second).Should(Succeed())
+ })
+
+ By("verifying new subscription has been created", func() {
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ assertSubscriptionExists(namespace, primaryPodInfo.Name, sub)
+ })
+
+ By("checking that the data is present inside the destination cluster database", func() {
+ tableLocator := TableLocator{
+ Namespace: namespace,
+ ClusterName: destinationClusterName,
+ DatabaseName: dbname,
+ TableName: tableName,
+ }
+ AssertDataExpectedCount(env, tableLocator, 2)
+ })
+ })
+ })
+})
diff --git a/tests/labels.go b/tests/labels.go
index 25b2b858b5..98649f2be2 100644
--- a/tests/labels.go
+++ b/tests/labels.go
@@ -32,6 +32,9 @@ const (
// LabelDeclarativeDatabases is a label for selecting the declarative databases test
LabelDeclarativeDatabases = "declarative-databases"
+ // LabelDeclarativePubSub is a label for selecting the publication / subscription test
+ LabelDeclarativePubSub = "publication-subscription"
+
// LabelDisruptive is the string for labelling disruptive tests
LabelDisruptive = "disruptive"
From 79b10310abc39ca5eb6f93358e8aa6805ba473d9 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 27 Nov 2024 19:04:54 +0100
Subject: [PATCH 170/836] chore(deps): update
xt0rted/pull-request-comment-branch action to v3 (main) (#6190)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 3f7d561e15..04c3cd31ef 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -145,7 +145,7 @@ jobs:
echo "LOG_LEVEL=${LOG_LEVEL}" >> $GITHUB_ENV
- name: Resolve Git reference
- uses: xt0rted/pull-request-comment-branch@v2
+ uses: xt0rted/pull-request-comment-branch@v3
id: refs
- name: Create comment
From 56a3916a17a3b5957b5034fa1bccc9f836ee74ed Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Thu, 28 Nov 2024 10:25:41 +0100
Subject: [PATCH 171/836] perf(restore): add informers and local webserver
cache (#6147)
Signed-off-by: Armando Ruocco
Signed-off-by: Leonardo Cecchi
Co-authored-by: Leonardo Cecchi
---
internal/cmd/manager/instance/restore/cmd.go | 147 +++++++++++-------
internal/cmd/manager/instance/restore/doc.go | 18 +++
.../cmd/manager/instance/restore/restore.go | 105 +++++++++++++
internal/cmd/manager/walrestore/cmd.go | 10 +-
internal/cnpi/plugin/client/contracts.go | 5 +-
internal/cnpi/plugin/client/wal.go | 8 +-
pkg/management/client.go | 9 +-
pkg/management/postgres/restore.go | 15 +-
8 files changed, 240 insertions(+), 77 deletions(-)
create mode 100644 internal/cmd/manager/instance/restore/doc.go
create mode 100644 internal/cmd/manager/instance/restore/restore.go
diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go
index 26dd30a4d1..120dfef278 100644
--- a/internal/cmd/manager/instance/restore/cmd.go
+++ b/internal/cmd/manager/instance/restore/cmd.go
@@ -14,7 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package restore implements the "instance restore" subcommand of the operator
package restore
import (
@@ -22,16 +21,22 @@ import (
"errors"
"os"
- barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
- "github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
- ctrl "sigs.k8s.io/controller-runtime/pkg/client"
-
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ controllerruntime "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/istio"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
)
// NewCmd creates the "restore" subcommand
@@ -44,24 +49,62 @@ func NewCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "restore [flags]",
SilenceErrors: true,
- PreRunE: func(cmd *cobra.Command, _ []string) error {
- return management.WaitForGetCluster(cmd.Context(), ctrl.ObjectKey{
- Name: clusterName,
- Namespace: namespace,
- })
- },
RunE: func(cmd *cobra.Command, _ []string) error {
- ctx := cmd.Context()
+ contextLogger := log.FromContext(cmd.Context())
+
+ // Canceling this context
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ // Step 1: build the manager
+ mgr, err := buildManager(clusterName, namespace)
+ if err != nil {
+ contextLogger.Error(err, "while building the manager")
+ return err
+ }
+
+ // Step 1.1: add the local webserver to the manager
+ localSrv, err := webserver.NewLocalWebServer(
+ postgres.NewInstance().WithClusterName(clusterName).WithNamespace(namespace),
+ mgr.GetClient(),
+ mgr.GetEventRecorderFor("local-webserver"),
+ )
+ if err != nil {
+ return err
+ }
+ if err = mgr.Add(localSrv); err != nil {
+ contextLogger.Error(err, "unable to add local webserver runnable")
+ return err
+ }
+
+ // Step 2: add the restore process to the manager
+ restoreProcess := restoreRunnable{
+ cli: mgr.GetClient(),
+ clusterName: clusterName,
+ namespace: namespace,
+ pgData: pgData,
+ pgWal: pgWal,
+ cancel: cancel,
+ }
+ if mgr.Add(&restoreProcess) != nil {
+ contextLogger.Error(err, "while building the restore process")
+ return err
+ }
+
+ // Step 3: start everything
+ if err := mgr.Start(ctx); err != nil {
+ contextLogger.Error(err, "restore error")
+ return err
+ }
- info := postgres.InitInfo{
- ClusterName: clusterName,
- Namespace: namespace,
- PgData: pgData,
- PgWal: pgWal,
+ if !errors.Is(ctx.Err(), context.Canceled) {
+ contextLogger.Error(err, "error while recovering backup")
+ return err
}
- return restoreSubCommand(ctx, info)
+ return nil
},
+
PostRunE: func(cmd *cobra.Command, _ []string) error {
if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil {
return err
@@ -81,42 +124,32 @@ func NewCmd() *cobra.Command {
return cmd
}
-func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error {
- contextLogger := log.FromContext(ctx)
- err := info.CheckTargetDataDirectory(ctx)
- if err != nil {
- return err
- }
-
- err = info.Restore(ctx)
- if err != nil {
- contextLogger.Error(err, "Error while restoring a backup")
- cleanupDataDirectoryIfNeeded(ctx, err, info.PgData)
- return err
- }
-
- contextLogger.Info("restore command execution completed without errors")
-
- return nil
-}
-
-func cleanupDataDirectoryIfNeeded(ctx context.Context, restoreError error, dataDirectory string) {
- contextLogger := log.FromContext(ctx)
-
- var barmanError *barmanCommand.CloudRestoreError
- if !errors.As(restoreError, &barmanError) {
- return
- }
-
- if !barmanError.IsRetriable() {
- return
- }
-
- contextLogger.Info("Cleaning up data directory", "directory", dataDirectory)
- if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) {
- contextLogger.Error(
- err,
- "error occurred cleaning up data directory",
- "directory", dataDirectory)
- }
+func buildManager(clusterName string, namespace string) (manager.Manager, error) {
+ return controllerruntime.NewManager(controllerruntime.GetConfigOrDie(), controllerruntime.Options{
+ Scheme: scheme.BuildWithAllKnownScheme(),
+ Cache: cache.Options{
+ ByObject: map[client.Object]cache.ByObject{
+ &apiv1.Cluster{}: {
+ Field: fields.OneTermEqualSelector("metadata.name", clusterName),
+ Namespaces: map[string]cache.Config{
+ namespace: {},
+ },
+ },
+ },
+ },
+ Client: client.Options{
+ Cache: &client.CacheOptions{
+ DisableFor: []client.Object{
+ &corev1.Secret{},
+ &corev1.ConfigMap{},
+ // todo(armru): we should remove the backup endpoints from the local webserver
+ &apiv1.Backup{},
+ },
+ },
+ },
+ LeaderElection: false,
+ Metrics: metricsserver.Options{
+ BindAddress: "0",
+ },
+ })
}
diff --git a/internal/cmd/manager/instance/restore/doc.go b/internal/cmd/manager/instance/restore/doc.go
new file mode 100644
index 0000000000..edb70590d7
--- /dev/null
+++ b/internal/cmd/manager/instance/restore/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package restore implements the "instance restore" subcommand of the operator
+package restore
diff --git a/internal/cmd/manager/instance/restore/restore.go b/internal/cmd/manager/instance/restore/restore.go
new file mode 100644
index 0000000000..7c05ea097b
--- /dev/null
+++ b/internal/cmd/manager/instance/restore/restore.go
@@ -0,0 +1,105 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restore
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+
+ barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
+ "github.com/cloudnative-pg/machinery/pkg/fileutils"
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+)
+
+type restoreRunnable struct {
+ cli client.Client
+ clusterName string
+ namespace string
+ pgData string
+ pgWal string
+ cancel context.CancelFunc
+}
+
+func (r *restoreRunnable) Start(ctx context.Context) error {
+ // we will wait this way for the mgr and informers to be online
+ if err := management.WaitForGetClusterWithClient(ctx, r.cli, client.ObjectKey{
+ Name: r.clusterName,
+ Namespace: r.namespace,
+ }); err != nil {
+ return fmt.Errorf("while waiting for API server connectivity: %w", err)
+ }
+
+ info := postgres.InitInfo{
+ ClusterName: r.clusterName,
+ Namespace: r.namespace,
+ PgData: r.pgData,
+ PgWal: r.pgWal,
+ }
+
+ if err := restoreSubCommand(ctx, info, r.cli); err != nil {
+ return fmt.Errorf("while restoring cluster: %s", err)
+ }
+
+ // the backup was restored correctly and we now ask
+ // the manager to quit
+ r.cancel()
+ return nil
+}
+
+func restoreSubCommand(ctx context.Context, info postgres.InitInfo, cli client.Client) error {
+ contextLogger := log.FromContext(ctx)
+ if err := info.CheckTargetDataDirectory(ctx); err != nil {
+ return err
+ }
+
+ if err := info.Restore(ctx, cli); err != nil {
+ contextLogger.Error(err, "Error while restoring a backup")
+ cleanupDataDirectoryIfNeeded(ctx, err, info.PgData)
+ return err
+ }
+
+ contextLogger.Info("restore command execution completed without errors")
+
+ return nil
+}
+
+func cleanupDataDirectoryIfNeeded(ctx context.Context, restoreError error, dataDirectory string) {
+ contextLogger := log.FromContext(ctx)
+
+ var barmanError *barmanCommand.CloudRestoreError
+ if !errors.As(restoreError, &barmanError) {
+ return
+ }
+
+ if !barmanError.IsRetriable() {
+ return
+ }
+
+ contextLogger.Info("Cleaning up data directory", "directory", dataDirectory)
+ if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) {
+ contextLogger.Error(
+ err,
+ "error occurred cleaning up data directory",
+ "directory", dataDirectory)
+ }
+}
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index 56bcdf447d..db88cb2725 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -118,9 +118,13 @@ func run(ctx context.Context, pgData string, podName string, args []string) erro
return fmt.Errorf("failed to get cluster: %w", err)
}
- if err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)); err != nil {
+ walFound, err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath))
+ if err != nil {
return err
}
+ if walFound {
+ return nil
+ }
recoverClusterName, recoverEnv, barmanConfiguration, err := GetRecoverConfiguration(cluster, podName)
if errors.Is(err, ErrNoBackupConfigured) {
@@ -244,7 +248,7 @@ func restoreWALViaPlugins(
cluster *apiv1.Cluster,
walName string,
destinationPathName string,
-) error {
+) (bool, error) {
contextLogger := log.FromContext(ctx)
plugins := repository.New()
@@ -267,7 +271,7 @@ func restoreWALViaPlugins(
)
if err != nil {
contextLogger.Error(err, "Error while loading required plugins")
- return err
+ return false, err
}
defer client.Close(ctx)
diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go
index 7ecf00960e..d136a71d08 100644
--- a/internal/cnpi/plugin/client/contracts.go
+++ b/internal/cnpi/plugin/client/contracts.go
@@ -126,13 +126,14 @@ type WalCapabilities interface {
) error
// RestoreWAL calls the loaded plugins to archive a WAL file.
- // This call is a no-op if there's no plugin implementing WAL archiving
+ // This call returns a boolean indicating if the WAL was restored
+ // by a plugin and the occurred error.
RestoreWAL(
ctx context.Context,
cluster client.Object,
sourceWALName string,
destinationFileName string,
- ) error
+ ) (bool, error)
}
// BackupCapabilities describes a set of behaviour needed to backup
diff --git a/internal/cnpi/plugin/client/wal.go b/internal/cnpi/plugin/client/wal.go
index c4e1bbcede..81e0234848 100644
--- a/internal/cnpi/plugin/client/wal.go
+++ b/internal/cnpi/plugin/client/wal.go
@@ -76,14 +76,14 @@ func (data *data) RestoreWAL(
cluster client.Object,
sourceWALName string,
destinationFileName string,
-) error {
+) (bool, error) {
var errorCollector error
contextLogger := log.FromContext(ctx)
serializedCluster, err := json.Marshal(cluster)
if err != nil {
- return fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ return false, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
cluster.GetObjectKind().GroupVersionKind().Kind,
cluster.GetNamespace(), cluster.GetName(),
err,
@@ -114,9 +114,9 @@ func (data *data) RestoreWAL(
pluginLogger.Trace("WAL restore via plugin failed, trying next one", "err", err)
errorCollector = multierr.Append(errorCollector, err)
} else {
- return nil
+ return true, nil
}
}
- return errorCollector
+ return false, errorCollector
}
diff --git a/pkg/management/client.go b/pkg/management/client.go
index cd35e60485..95105530f6 100644
--- a/pkg/management/client.go
+++ b/pkg/management/client.go
@@ -140,7 +140,14 @@ func WaitForGetCluster(ctx context.Context, clusterObjectKey client.ObjectKey) e
return err
}
- err = retry.OnError(readinessCheckRetry, resources.RetryAlways, func() error {
+ return WaitForGetClusterWithClient(ctx, cli, clusterObjectKey)
+}
+
+// WaitForGetClusterWithClient will wait for a successful get cluster to be executed
+func WaitForGetClusterWithClient(ctx context.Context, cli client.Client, clusterObjectKey client.ObjectKey) error {
+ logger := log.FromContext(ctx).WithName("wait-for-get-cluster")
+
+ err := retry.OnError(readinessCheckRetry, resources.RetryAlways, func() error {
if err := cli.Get(ctx, clusterObjectKey, &apiv1.Cluster{}); err != nil {
logger.Warning("Encountered an error while executing get cluster. Will wait and retry", "error", err.Error())
return err
diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go
index c1652107c8..2cdad8c8ea 100644
--- a/pkg/management/postgres/restore.go
+++ b/pkg/management/postgres/restore.go
@@ -53,7 +53,6 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/configfile"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/external"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants"
postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
@@ -233,14 +232,10 @@ func (info InitInfo) createBackupObjectForSnapshotRestore(
}
// Restore restores a PostgreSQL cluster from a backup into the object storage
-func (info InitInfo) Restore(ctx context.Context) error {
+func (info InitInfo) Restore(ctx context.Context, cli client.Client) error {
contextLogger := log.FromContext(ctx)
- typedClient, err := management.NewControllerRuntimeClient()
- if err != nil {
- return err
- }
- cluster, err := info.loadCluster(ctx, typedClient)
+ cluster, err := info.loadCluster(ctx, cli)
if err != nil {
return err
}
@@ -284,13 +279,13 @@ func (info InitInfo) Restore(ctx context.Context) error {
} else {
// Before starting the restore we check if the archive destination is safe to use
// otherwise, we stop creating the cluster
- err = info.checkBackupDestination(ctx, typedClient, cluster)
+ err = info.checkBackupDestination(ctx, cli, cluster)
if err != nil {
return err
}
// If we need to download data from a backup, we do it
- backup, env, err := info.loadBackup(ctx, typedClient, cluster)
+ backup, env, err := info.loadBackup(ctx, cli, cluster)
if err != nil {
return err
}
@@ -332,7 +327,7 @@ func (info InitInfo) Restore(ctx context.Context) error {
}
connectionString, err := external.ConfigureConnectionToServer(
- ctx, typedClient, info.Namespace, &server)
+ ctx, cli, info.Namespace, &server)
if err != nil {
return err
}
From e8320138d5338f75ac146f56c41b0b5cbf58daec Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 28 Nov 2024 13:26:46 +0100
Subject: [PATCH 172/836] fix(linter): prepare code for golangci-lint 1.62.2
(#6198)
The new linter found new stuff that should have fix before merging
the new version into the checks.
One of the main changes is ginkgo-linter that now recommends use
Succeed() instead of BeNil()
Signed-off-by: Jonathan Gonzalez V.
---
tests/e2e/asserts_test.go | 12 ++++++------
tests/e2e/certificates_test.go | 6 +++---
tests/e2e/logs_test.go | 4 ++--
tests/e2e/managed_services_test.go | 6 +++---
tests/e2e/replica_mode_cluster_test.go | 4 ++--
tests/e2e/rolling_update_test.go | 4 ++--
tests/e2e/storage_expansion_test.go | 2 +-
tests/e2e/syncreplicas_test.go | 8 ++++----
tests/utils/backup.go | 2 +-
tests/utils/environment.go | 2 +-
tests/utils/pod.go | 7 ++-----
11 files changed, 27 insertions(+), 30 deletions(-)
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 37c0c0276d..5bdf763c9a 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -916,7 +916,7 @@ func AssertReplicaModeCluster(
Eventually(func() error {
primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName)
return err
- }, 30, 3).Should(BeNil())
+ }, 30, 3).Should(Succeed())
AssertPgRecoveryMode(primaryReplicaCluster, true)
})
@@ -1704,7 +1704,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) {
return err
}
return nil
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
scheduledBackupNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: scheduledBackupName,
@@ -1752,7 +1752,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) {
return err
}
return nil
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
scheduledBackupNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: scheduledBackupName,
@@ -2203,7 +2203,7 @@ func OnlineResizePVC(namespace, clusterName string) {
Eventually(func() error {
_, _, err := testsUtils.RunUnchecked(cmd)
return err
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
}
})
By("verifying Cluster storage is expanded", func() {
@@ -2259,7 +2259,7 @@ func OfflineResizePVC(namespace, clusterName string, timeout int) {
Eventually(func() error {
_, _, err := testsUtils.RunUnchecked(cmd)
return err
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
}
})
By("deleting Pod and PVCs, first replicas then the primary", func() {
@@ -2484,7 +2484,7 @@ func CreateResourcesFromFileWithError(namespace, sampleFilePath string) error {
func CreateResourceFromFile(namespace, sampleFilePath string) {
Eventually(func() error {
return CreateResourcesFromFileWithError(namespace, sampleFilePath)
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
}
// GetYAMLContent opens a .yaml of .template file and returns its content
diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go
index 5c419acb8a..5cd0f173d7 100644
--- a/tests/e2e/certificates_test.go
+++ b/tests/e2e/certificates_test.go
@@ -135,7 +135,7 @@ var _ = Describe("Certificates", func() {
return err
}
return nil
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
Eventually(func() (bool, error) {
certUpdateStatus := false
@@ -176,7 +176,7 @@ var _ = Describe("Certificates", func() {
return err
}
return nil
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
Eventually(func() (bool, error) {
cluster, err := env.GetCluster(namespace, clusterName)
@@ -212,7 +212,7 @@ var _ = Describe("Certificates", func() {
return err
}
return nil
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
Eventually(func() (bool, error) {
cluster, err := env.GetCluster(namespace, clusterName)
diff --git a/tests/e2e/logs_test.go b/tests/e2e/logs_test.go
index 59a2f11337..7bd3c44259 100644
--- a/tests/e2e/logs_test.go
+++ b/tests/e2e/logs_test.go
@@ -86,7 +86,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "app", "-tAc",
errorTestQuery)
return queryError
- }, RetryTimeout, PollingTime).ShouldNot(BeNil())
+ }, RetryTimeout, PollingTime).ShouldNot(Succeed())
// Eventually the error log line will be logged
Eventually(func(g Gomega) bool {
@@ -118,7 +118,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
*primaryPod, specs.PostgresContainerName,
&commandTimeout, "psql", "-U", "postgres", "app", "-tAc", errorTestQuery)
return queryError
- }, RetryTimeout, PollingTime).ShouldNot(BeNil())
+ }, RetryTimeout, PollingTime).ShouldNot(Succeed())
// Expect the query to be eventually logged on the primary
Eventually(func() (bool, error) {
diff --git a/tests/e2e/managed_services_test.go b/tests/e2e/managed_services_test.go
index 68131aded9..ee139aa744 100644
--- a/tests/e2e/managed_services_test.go
+++ b/tests/e2e/managed_services_test.go
@@ -82,7 +82,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{}
return env.Client.Update(ctx, cluster)
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env)
Eventually(func(g Gomega) {
@@ -128,7 +128,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{}
return env.Client.Update(ctx, cluster)
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env)
@@ -189,7 +189,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.ObjectMeta.Labels["new-label"] = "new"
return env.Client.Update(ctx, cluster)
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
})
By("expecting the service to be recreated", func() {
diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go
index a22f4b98ad..97b94c9781 100644
--- a/tests/e2e/replica_mode_cluster_test.go
+++ b/tests/e2e/replica_mode_cluster_test.go
@@ -188,7 +188,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
Eventually(func() error {
clusterOnePrimary, err = env.GetClusterPrimary(namespace, clusterOneName)
return err
- }, 30, 3).Should(BeNil())
+ }, 30, 3).Should(Succeed())
AssertPgRecoveryMode(clusterOnePrimary, true)
})
@@ -206,7 +206,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
Eventually(func() error {
clusterTwoPrimary, err = env.GetClusterPrimary(namespace, clusterTwoName)
return err
- }, 30, 3).Should(BeNil())
+ }, 30, 3).Should(Succeed())
AssertPgRecoveryMode(clusterTwoPrimary, false)
})
diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go
index ec50ba5119..7b3bde3ae9 100644
--- a/tests/e2e/rolling_update_test.go
+++ b/tests/e2e/rolling_update_test.go
@@ -118,7 +118,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
cluster.Spec.ImageName = updatedImageName
return env.Client.Update(env.Ctx, cluster)
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
// All the postgres containers should have the updated image
AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, timeout)
@@ -599,7 +599,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// Wait until we really deleted it
Eventually(func() error {
return env.Client.Get(env.Ctx, ctrl.ObjectKey{Name: catalog.Name}, catalog)
- }, 30).Should(MatchError(apierrs.IsNotFound, metav1.StatusReasonNotFound))
+ }, 30).Should(MatchError(apierrs.IsNotFound, string(metav1.StatusReasonNotFound)))
})
Context("Three Instances", func() {
const (
diff --git a/tests/e2e/storage_expansion_test.go b/tests/e2e/storage_expansion_test.go
index 7b27204806..4713dde4c3 100644
--- a/tests/e2e/storage_expansion_test.go
+++ b/tests/e2e/storage_expansion_test.go
@@ -98,7 +98,7 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() {
return err
}
return nil
- }, 60, 5).Should(BeNil())
+ }, 60, 5).Should(Succeed())
})
OfflineResizePVC(namespace, clusterName, 600)
})
diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go
index 1b9f47c94b..fcd321874e 100644
--- a/tests/e2e/syncreplicas_test.go
+++ b/tests/e2e/syncreplicas_test.go
@@ -109,7 +109,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
cluster.Spec.MaxSyncReplicas = 1
return env.Client.Update(env.Ctx, cluster)
- }, RetryTimeout, 5).Should(BeNil())
+ }, RetryTimeout, 5).Should(Succeed())
// Scale the cluster down to 2 pods
_, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace,
@@ -200,7 +200,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = ptr.To(1)
cluster.Spec.PostgresConfiguration.Synchronous.Number = 1
return env.Client.Update(env.Ctx, cluster)
- }, RetryTimeout, 5).Should(BeNil())
+ }, RetryTimeout, 5).Should(Succeed())
getSyncReplicationCount(namespace, clusterName, "quorum", 1)
compareSynchronousStandbyNames(namespace, clusterName, "ANY 1")
@@ -212,7 +212,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.Synchronous.Method = apiv1.SynchronousReplicaConfigurationMethodFirst
return env.Client.Update(env.Ctx, cluster)
- }, RetryTimeout, 5).Should(BeNil())
+ }, RetryTimeout, 5).Should(Succeed())
getSyncReplicationCount(namespace, clusterName, "sync", 1)
compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1")
@@ -226,7 +226,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre = []string{"preSyncReplica"}
cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPost = []string{"postSyncReplica"}
return env.Client.Update(env.Ctx, cluster)
- }, RetryTimeout, 5).Should(BeNil())
+ }, RetryTimeout, 5).Should(Succeed())
compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1 (\"preSyncReplica\"")
compareSynchronousStandbyNames(namespace, clusterName, "\"postSyncReplica\")")
})
diff --git a/tests/utils/backup.go b/tests/utils/backup.go
index e07f20d2a5..9ef2aadd9c 100644
--- a/tests/utils/backup.go
+++ b/tests/utils/backup.go
@@ -46,7 +46,7 @@ func ExecuteBackup(
return fmt.Errorf("could not create backup.\nStdErr: %v\nError: %v", stderr, err)
}
return nil
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
backupNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: backupName,
diff --git a/tests/utils/environment.go b/tests/utils/environment.go
index 2596f87186..b93ea595d8 100644
--- a/tests/utils/environment.go
+++ b/tests/utils/environment.go
@@ -186,7 +186,7 @@ func (env TestingEnvironment) EventuallyExecCommand(
return err
}
return nil
- }, RetryTimeout, PollingTime).Should(BeNil())
+ }, RetryTimeout, PollingTime).Should(Succeed())
return stdOut, stdErr, err
}
diff --git a/tests/utils/pod.go b/tests/utils/pod.go
index 25841da6a1..e439d0e00f 100644
--- a/tests/utils/pod.go
+++ b/tests/utils/pod.go
@@ -266,11 +266,8 @@ func (env TestingEnvironment) EventuallyExecQueryInInstancePod(
Namespace: podLocator.Namespace,
PodName: podLocator.PodName,
}, dbname, query)
- if err != nil {
- return err
- }
- return nil
- }, retryTimeout, pollingTime).Should(BeNil())
+ return err
+ }, retryTimeout, pollingTime).Should(Succeed())
return stdOut, stdErr, err
}
From fb1554712f23f34c6fd5d3532a1898098f6e3886 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 28 Nov 2024 15:44:14 +0100
Subject: [PATCH 173/836] chore(deps): update dependency golangci/golangci-lint
to v1.62.2 (main) (#6182)
---
.github/workflows/continuous-integration.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 592525397c..db7bbb243e 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -17,7 +17,7 @@ on:
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.23.x"
- GOLANGCI_LINT_VERSION: "v1.61.0"
+ GOLANGCI_LINT_VERSION: "v1.62.2"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.25.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
From 18f1c062b4166556ddbbdfd0684a22c22fd8b822 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Fri, 29 Nov 2024 06:53:25 +0100
Subject: [PATCH 174/836] docs: clarify support for PostgreSQL 17 (#6202)
Closes #4685
Signed-off-by: Gabriele Bartolini
---
docs/src/supported_releases.md | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md
index c1bfbd9128..9717ee5cc0 100644
--- a/docs/src/supported_releases.md
+++ b/docs/src/supported_releases.md
@@ -79,13 +79,14 @@ Git tags for versions are prefixed with `v`.
## Support status of CloudNativePG releases
-| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
-|-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------|
-| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 121 - 17 |
-| 1.23.x | Yes | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 | 1.30, 1.31 | 121 - 17 |
-| main | No, development only | | | | | 121 - 17 |
+
+| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
+|-----------------|----------------------|---------------------|---------------------|-------------------------------|---------------------------|-----------------------------|
+| 1.25.x | Yes | December XX, 2024 | ~ February, 2025 | 1.29, 1.30, 1.31, 1.32 (??) | 1.27, 1.28 | 13 - 17 |
+| 1.24.x | Yes | August 22, 2024 | February XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 |
+| main | No, development only | | | | | 13 - 17 |
-1 _PostgreSQL 12 will be supported until November 14, 2024._
+
The list of supported Kubernetes versions in the table depends on what
the CloudNativePG maintainers think is reasonable to support and to test.
@@ -139,6 +140,7 @@ version of PostgreSQL, we might not be able to help you.
| Version | Release date | End of life | Compatible Kubernetes versions |
|-----------------|-------------------|---------------------|--------------------------------|
+| 1.23.x | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 |
| 1.22.x | December 21, 2023 | July 24, 2024 | 1.26, 1.27, 1.28 |
| 1.21.x | October 12, 2023 | Jun 12, 2024 | 1.25, 1.26, 1.27, 1.28 |
| 1.20.x | April 27, 2023 | January 21, 2024 | 1.24, 1.25, 1.26, 1.27 |
From 8b7bd438d747336e027fb281b60ece76a86c4ee7 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 29 Nov 2024 15:21:37 +0100
Subject: [PATCH 175/836] fix(deps): update kubernetes patches (main) (#6181)
https://github.com/prometheus-operator/prometheus-operator `v0.78.1` -> `v0.78.2`
https://github.com/kubernetes/api `v0.31.2` -> `v0.31.3`
https://github.com/kubernetes/apiextensions-apiserver `v0.31.2` -> `v0.31.3`
https://github.com/kubernetes/apimachinery `v0.31.2` -> `v0.31.3`
https://github.com/kubernetes/cli-runtime `v0.31.2` -> `v0.31.3`
https://github.com/kubernetes/client-go `v0.31.2` -> `v0.31.3`
https://github.com/kubernetes-sigs/controller-runtime `v0.19.1` -> `v0.19.2`
---
go.mod | 14 +++++++-------
go.sum | 28 ++++++++++++++--------------
2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/go.mod b/go.mod
index f276419c5c..eaa13af67f 100644
--- a/go.mod
+++ b/go.mod
@@ -27,7 +27,7 @@ require (
github.com/mitchellh/go-ps v1.0.0
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.36.0
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
github.com/sethvargo/go-password v0.3.1
@@ -40,13 +40,13 @@ require (
golang.org/x/term v0.26.0
google.golang.org/grpc v1.68.0
gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.31.2
- k8s.io/apiextensions-apiserver v0.31.2
- k8s.io/apimachinery v0.31.2
- k8s.io/cli-runtime v0.31.2
- k8s.io/client-go v0.31.2
+ k8s.io/api v0.31.3
+ k8s.io/apiextensions-apiserver v0.31.3
+ k8s.io/apimachinery v0.31.3
+ k8s.io/cli-runtime v0.31.3
+ k8s.io/client-go v0.31.3
k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078
- sigs.k8s.io/controller-runtime v0.19.1
+ sigs.k8s.io/controller-runtime v0.19.2
sigs.k8s.io/yaml v1.4.0
)
diff --git a/go.sum b/go.sum
index 317da64330..cf4ed929c2 100644
--- a/go.sum
+++ b/go.sum
@@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 h1:Fm9Z+FabnB+6EoGq15j+pyLmaK6hYrYOpBlTzOLTQ+E=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 h1:SyoVBXD/r0PntR1rprb90ClI32FSUNOCWqqTatnipHM=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
@@ -278,24 +278,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
-k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
-k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
-k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
-k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
-k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ=
-k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q=
-k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
-k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
+k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
+k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
+k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE=
+k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4=
+k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
+k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI=
+k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8=
+k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
+k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo=
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA=
k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno=
k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk=
-sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
+sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8=
+sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU=
From 28bcc2cc18736705713660e0f711d55d91a89c2c Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Fri, 29 Nov 2024 16:01:32 +0100
Subject: [PATCH 176/836] docs: update join slack link (#6217)
Closes #6218
Signed-off-by: Jonathan Gonzalez V.
---
.github/ISSUE_TEMPLATE/config.yml | 2 +-
CONTRIBUTING.md | 2 +-
README.md | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index f522006c44..17f1e503e7 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -4,5 +4,5 @@ contact_links:
url: https://github.com/cloudnative-pg/cloudnative-pg/discussions
about: Please ask and answer questions here.
- name: Slack chat
- url: https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g
+ url: https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A
about: Please join the slack channel and interact with our community
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c3a18b3d1d..e6b74b5db6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -40,7 +40,7 @@ For development contributions, please refer to the separate section called
## Ask for Help
The best way to reach us with a question when contributing is to drop a line in
-our [Slack channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g), or
+our [Slack channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A), or
start a new Github discussion.
## Raising Issues
diff --git a/README.md b/README.md
index 43b055e250..8a8e8df0d6 100644
--- a/README.md
+++ b/README.md
@@ -113,7 +113,7 @@ MariaDB cluster).
## Communications
-- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g)
+- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A)
- [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions)
- [Twitter](https://twitter.com/CloudNativePg)
From 6c4dfc933173b70a101aef5c93714439c032dd6c Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Mon, 2 Dec 2024 14:23:58 +0100
Subject: [PATCH 177/836] chore(docs): template for release notes for a new
minor (#6229)
Signed-off-by: Gabriele Bartolini
---
contribute/release-notes-template.md | 70 ++++++++++++++++++++++++++++
contribute/release_procedure.md | 2 +
2 files changed, 72 insertions(+)
create mode 100644 contribute/release-notes-template.md
diff --git a/contribute/release-notes-template.md b/contribute/release-notes-template.md
new file mode 100644
index 0000000000..53501c9492
--- /dev/null
+++ b/contribute/release-notes-template.md
@@ -0,0 +1,70 @@
+
+# Release notes for CloudNativePG 1.XX
+
+History of user-visible changes in the 1.XX minor release of CloudNativePG.
+
+For a complete list of changes, please refer to the
+[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.XXX
+on the release branch in GitHub.
+
+## Version 1.XX.0-rc1
+
+**Release date:** Mon DD, 20YY
+
+### Important changes:
+
+- OPTIONAL
+- OPTIONAL
+
+### Features:
+
+- **MAIN FEATURE #1**: short description
+- **MAIN FEATURE #2**: short description
+
+### Enhancements:
+
+- Add ...
+- Introduce ...
+- Allow ...
+- Enhance ...
+- `cnpg` plugin updates:
+ - Enhance ...
+ - Add ...
+
+### Security:
+
+- Add ...
+- Improve ...
+
+### Fixes:
+
+- Enhance ...
+- Disable ...
+- Gracefully handle ...
+- Wait ...
+- Fix ...
+- Address ...
+- `cnpg` plugin:
+ - ...
+ - ...
+
+### Supported versions
+
+- Kubernetes 1.31, 1.30, and 1.29
+- PostgreSQL 17, 16, 15, 14, and 13
+ - PostgreSQL 17.X is the default image
+ - PostgreSQL 13 support ends on November 12, 2025
diff --git a/contribute/release_procedure.md b/contribute/release_procedure.md
index 0e708302e2..15aa72df43 100644
--- a/contribute/release_procedure.md
+++ b/contribute/release_procedure.md
@@ -71,6 +71,8 @@ activities:
update [`docs/src/release_notes.md`](../docs/src/release_notes.md)
and [`.github/ISSUE_TEMPLATE/bug.yml`](../.github/ISSUE_TEMPLATE/bug.yml).
These changes should go in a PR against `main`, and get maintainer approval.
+ Look at the template file to get an idea of how to start a new minor release
+ version document.
- **Capabilities page:** in case of a new minor release, ensure that the
operator capability levels page in
From cfda74bfbc10da77a57b2ebb2685c26d77afdfd8 Mon Sep 17 00:00:00 2001
From: Timo Adler <44780691+Eykha@users.noreply.github.com>
Date: Mon, 2 Dec 2024 14:27:38 +0100
Subject: [PATCH 178/836] fix(docs): use correct value for
`cnpg.io/skipWalArchiving` (#4848)
Signed-off-by: Timo Adler <44780691+Eykha@users.noreply.github.com>
---
docs/src/labels_annotations.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md
index 74b9d247ae..8da4514584 100644
--- a/docs/src/labels_annotations.md
+++ b/docs/src/labels_annotations.md
@@ -190,7 +190,7 @@ These predefined annotations are managed by CloudNativePG.
risk.
`cnpg.io/skipWalArchiving`
-: When set to `true` on a `Cluster` resource, the operator disables WAL archiving.
+: When set to `enabled` on a `Cluster` resource, the operator disables WAL archiving.
This will set `archive_mode` to `off` and require a restart of all PostgreSQL
instances. Use at your own risk.
From fa663f2b74974352bca24c831e93735fbbc21ce8 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Mon, 2 Dec 2024 14:33:15 +0100
Subject: [PATCH 179/836] feat(import): add support for extra `pg_dump` and
`pg_restore` options (#6214)
This commit introduces two optional parameters in the
`.spec.initdb.import` stanza:
- `pgDumpExtraOptions`
- `pgRestoreExtraOptions`
These parameters allow users to specify additional options for the
underlying `pg_dump` and `pg_restore` commands, providing greater
flexibility when using the database import feature.
Additionally, the default file format has been changed from `custom`
(`-Fc`) to `directory` (`-Fd`) for improved compatibility and
performance (e.g. `--jobs` requires the directory format).
Closes #5832
Signed-off-by: Ben Healey
Signed-off-by: Gabriele Bartolini
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: Ben Healey
Co-authored-by: Jonathan Gonzalez V.
---
.wordlist-en-custom.txt | 2 +
api/v1/cluster_types.go | 14 +++++++
api/v1/zz_generated.deepcopy.go | 10 +++++
.../bases/postgresql.cnpg.io_clusters.yaml | 18 +++++++++
docs/src/cloudnative-pg.v1.md | 20 ++++++++++
docs/src/database_import.md | 39 +++++++++++++++++--
.../cluster-import-snapshot-basicauth.yaml | 4 ++
.../postgres/logicalimport/database.go | 14 ++++++-
.../postgres/logicalimport/microservice.go | 30 ++++++++++----
.../postgres/logicalimport/monolith.go | 14 ++++++-
tests/utils/import_db.go | 2 +
11 files changed, 152 insertions(+), 15 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index c72a9994a2..fc6ba07aae 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -1008,6 +1008,8 @@ pgAdmin
pgBouncer
pgBouncerIntegration
pgBouncerSecrets
+pgDumpExtraOptions
+pgRestoreExtraOptions
pgSQL
pgadmin
pgaudit
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index c701345216..c841a04954 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -1566,6 +1566,20 @@ type Import struct {
// `pg_restore` are invoked, avoiding data import. Default: `false`.
// +optional
SchemaOnly bool `json:"schemaOnly,omitempty"`
+
+ // List of custom options to pass to the `pg_dump` command. IMPORTANT:
+ // Use these options with caution and at your own risk, as the operator
+ // does not validate their content. Be aware that certain options may
+ // conflict with the operator's intended functionality or design.
+ // +optional
+ PgDumpExtraOptions []string `json:"pgDumpExtraOptions,omitempty"`
+
+ // List of custom options to pass to the `pg_restore` command. IMPORTANT:
+ // Use these options with caution and at your own risk, as the operator
+ // does not validate their content. Be aware that certain options may
+ // conflict with the operator's intended functionality or design.
+ // +optional
+ PgRestoreExtraOptions []string `json:"pgRestoreExtraOptions,omitempty"`
}
// ImportSource describes the source for the logical snapshot
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index 0c367fc416..a0b80fcd9c 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -1387,6 +1387,16 @@ func (in *Import) DeepCopyInto(out *Import) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.PgDumpExtraOptions != nil {
+ in, out := &in.PgDumpExtraOptions, &out.PgDumpExtraOptions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PgRestoreExtraOptions != nil {
+ in, out := &in.PgRestoreExtraOptions, &out.PgRestoreExtraOptions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Import.
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 53bd6f571e..eca57c182e 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -1518,6 +1518,24 @@ spec:
items:
type: string
type: array
+ pgDumpExtraOptions:
+ description: |-
+ List of custom options to pass to the `pg_dump` command. IMPORTANT:
+ Use these options with caution and at your own risk, as the operator
+ does not validate their content. Be aware that certain options may
+ conflict with the operator's intended functionality or design.
+ items:
+ type: string
+ type: array
+ pgRestoreExtraOptions:
+ description: |-
+ List of custom options to pass to the `pg_restore` command. IMPORTANT:
+ Use these options with caution and at your own risk, as the operator
+ does not validate their content. Be aware that certain options may
+ conflict with the operator's intended functionality or design.
+ items:
+ type: string
+ type: array
postImportApplicationSQL:
description: |-
List of SQL queries to be executed as a superuser in the application
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index c323ced382..2ef5d831f4 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -2747,6 +2747,26 @@ database right after is imported - to be used with extreme care
pg_restore are invoked, avoiding data import. Default: false.
+pgDumpExtraOptions
+[]string
+
+
+ List of custom options to pass to the pg_dump command. IMPORTANT:
+Use these options with caution and at your own risk, as the operator
+does not validate their content. Be aware that certain options may
+conflict with the operator's intended functionality or design.
+
+
+pgRestoreExtraOptions
+[]string
+
+
+ List of custom options to pass to the pg_restore command. IMPORTANT:
+Use these options with caution and at your own risk, as the operator
+does not validate their content. Be aware that certain options may
+conflict with the operator's intended functionality or design.
+
+
diff --git a/docs/src/database_import.md b/docs/src/database_import.md
index 3308b5f6f1..2fc3b4500e 100644
--- a/docs/src/database_import.md
+++ b/docs/src/database_import.md
@@ -73,7 +73,7 @@ performed in 4 steps:
- `initdb` bootstrap of the new cluster
- export of the selected database (in `initdb.import.databases`) using
- `pg_dump -Fc`
+ `pg_dump -Fd`
- import of the database using `pg_restore --no-acl --no-owner` into the
`initdb.database` (application database) owned by the `initdb.owner` user
- cleanup of the database dump file
@@ -145,7 +145,7 @@ There are a few things you need to be aware of when using the `microservice` typ
`externalCluster` during the operation
- Connection to the source database must be granted with the specified user
that needs to run `pg_dump` and read roles information (*superuser* is OK)
-- Currently, the `pg_dump -Fc` result is stored temporarily inside the `dumps`
+- Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps`
folder in the `PGDATA` volume, so there should be enough available space to
temporarily contain the dump result on the assigned node, as well as the
restored data and indexes. Once the import operation is completed, this
@@ -162,7 +162,7 @@ The operation is performed in the following steps:
- `initdb` bootstrap of the new cluster
- export and import of the selected roles
- export of the selected databases (in `initdb.import.databases`), one at a time,
- using `pg_dump -Fc`
+ using `pg_dump -Fd`
- create each of the selected databases and import data using `pg_restore`
- run `ANALYZE` on each imported database
- cleanup of the database dump files
@@ -222,7 +222,7 @@ There are a few things you need to be aware of when using the `monolith` type:
- Connection to the source database must be granted with the specified user
that needs to run `pg_dump` and retrieve roles information (*superuser* is
OK)
-- Currently, the `pg_dump -Fc` result is stored temporarily inside the `dumps`
+- Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps`
folder in the `PGDATA` volume, so there should be enough available space to
temporarily contain the dump result on the assigned node, as well as the
restored data and indexes. Once the import operation is completed, this
@@ -268,6 +268,37 @@ unnecessary writes in the checkpoint area by tuning Postgres GUCs like
`shared_buffers`, `max_wal_size`, `checkpoint_timeout` directly in the
`Cluster` configuration.
+## Customizing `pg_dump` and `pg_restore` Behavior
+
+You can customize the behavior of `pg_dump` and `pg_restore` by specifying
+additional options using the `pgDumpExtraOptions` and `pgRestoreExtraOptions`
+parameters. For instance, you can enable parallel jobs to speed up data
+import/export processes, as shown in the following example:
+
+```yaml
+ #
+ bootstrap:
+ initdb:
+ import:
+ type: microservice
+ databases:
+ - app
+ source:
+ externalCluster: cluster-example
+ pgDumpExtraOptions:
+ - '--jobs=2'
+ pgRestoreExtraOptions:
+ - '--jobs=2'
+ #
+```
+
+!!! Warning
+ Use the `pgDumpExtraOptions` and `pgRestoreExtraOptions` fields with
+ caution and at your own risk. These options are not validated or verified by
+ the operator, and some configurations may conflict with its intended
+ functionality or behavior. Always test thoroughly in a safe and controlled
+ environment before applying them in production.
+
## Online Import and Upgrades
Logical replication offers a powerful way to import any PostgreSQL database
diff --git a/docs/src/samples/cluster-import-snapshot-basicauth.yaml b/docs/src/samples/cluster-import-snapshot-basicauth.yaml
index 967f23adba..5f6cf6e76b 100644
--- a/docs/src/samples/cluster-import-snapshot-basicauth.yaml
+++ b/docs/src/samples/cluster-import-snapshot-basicauth.yaml
@@ -13,6 +13,10 @@ spec:
- app
source:
externalCluster: cluster-example
+ pgDumpExtraOptions:
+ - '--jobs=2'
+ pgRestoreExtraOptions:
+ - '--jobs=2'
storage:
size: 1Gi
externalClusters:
diff --git a/pkg/management/postgres/logicalimport/database.go b/pkg/management/postgres/logicalimport/database.go
index 82e87ed089..e09adf3e4a 100644
--- a/pkg/management/postgres/logicalimport/database.go
+++ b/pkg/management/postgres/logicalimport/database.go
@@ -88,6 +88,7 @@ func (ds *databaseSnapshotter) exportDatabases(
ctx context.Context,
target pool.Pooler,
databases []string,
+ extraOptions []string,
) error {
contextLogger := log.FromContext(ctx)
sectionsToExport := []string{}
@@ -100,12 +101,13 @@ func (ds *databaseSnapshotter) exportDatabases(
contextLogger.Info("exporting database", "databaseName", database)
dsn := target.GetDsn(database)
options := []string{
- "-Fc",
+ "-Fd",
"-f", generateFileNameForDatabase(database),
"-d", dsn,
"-v",
}
options = append(options, sectionsToExport...)
+ options = append(options, extraOptions...)
contextLogger.Info("Running pg_dump", "cmd", pgDump,
"options", options)
@@ -123,6 +125,7 @@ func (ds *databaseSnapshotter) importDatabases(
ctx context.Context,
target pool.Pooler,
databases []string,
+ extraOptions []string,
) error {
contextLogger := log.FromContext(ctx)
@@ -156,6 +159,7 @@ func (ds *databaseSnapshotter) importDatabases(
generateFileNameForDatabase(database),
}
+ options = append(options, extraOptions...)
options = append(options, alwaysPresentOptions...)
contextLogger.Info("Running pg_restore",
@@ -179,6 +183,7 @@ func (ds *databaseSnapshotter) importDatabaseContent(
database string,
targetDatabase string,
owner string,
+ extraOptions []string,
) error {
contextLogger := log.FromContext(ctx)
@@ -204,7 +209,9 @@ func (ds *databaseSnapshotter) importDatabaseContent(
"section", section,
)
- options := []string{
+ var options []string
+
+ alwaysPresentOptions := []string{
"-U", "postgres",
"--no-owner",
"--no-privileges",
@@ -214,6 +221,9 @@ func (ds *databaseSnapshotter) importDatabaseContent(
generateFileNameForDatabase(database),
}
+ options = append(options, extraOptions...)
+ options = append(options, alwaysPresentOptions...)
+
contextLogger.Info("Running pg_restore",
"cmd", pgRestore,
"options", options)
diff --git a/pkg/management/postgres/logicalimport/microservice.go b/pkg/management/postgres/logicalimport/microservice.go
index e84a28d843..a34593e563 100644
--- a/pkg/management/postgres/logicalimport/microservice.go
+++ b/pkg/management/postgres/logicalimport/microservice.go
@@ -34,18 +34,29 @@ func Microservice(
) error {
contextLogger := log.FromContext(ctx)
ds := databaseSnapshotter{cluster: cluster}
- databases := cluster.Spec.Bootstrap.InitDB.Import.Databases
+ initDB := cluster.Spec.Bootstrap.InitDB
+ databases := initDB.Import.Databases
+
contextLogger.Info("starting microservice clone process")
if err := createDumpsDirectory(); err != nil {
return nil
}
- if err := ds.exportDatabases(ctx, origin, databases); err != nil {
+ if err := ds.exportDatabases(
+ ctx,
+ origin,
+ databases,
+ initDB.Import.PgDumpExtraOptions,
+ ); err != nil {
return err
}
- if err := ds.dropExtensionsFromDatabase(ctx, destination, cluster.Spec.Bootstrap.InitDB.Database); err != nil {
+ if err := ds.dropExtensionsFromDatabase(
+ ctx,
+ destination,
+ initDB.Database,
+ ); err != nil {
return err
}
@@ -53,8 +64,9 @@ func Microservice(
ctx,
destination,
databases[0],
- cluster.Spec.Bootstrap.InitDB.Database,
- cluster.Spec.Bootstrap.InitDB.Owner,
+ initDB.Database,
+ initDB.Owner,
+ initDB.Import.PgRestoreExtraOptions,
); err != nil {
return err
}
@@ -63,9 +75,13 @@ func Microservice(
return err
}
- if err := ds.executePostImportQueries(ctx, destination, cluster.Spec.Bootstrap.InitDB.Database); err != nil {
+ if err := ds.executePostImportQueries(
+ ctx,
+ destination,
+ initDB.Database,
+ ); err != nil {
return err
}
- return ds.analyze(ctx, destination, []string{cluster.Spec.Bootstrap.InitDB.Database})
+ return ds.analyze(ctx, destination, []string{initDB.Database})
}
diff --git a/pkg/management/postgres/logicalimport/monolith.go b/pkg/management/postgres/logicalimport/monolith.go
index f65b0260c7..c63d787e91 100644
--- a/pkg/management/postgres/logicalimport/monolith.go
+++ b/pkg/management/postgres/logicalimport/monolith.go
@@ -53,11 +53,21 @@ func Monolith(
return err
}
- if err := ds.exportDatabases(ctx, origin, databases); err != nil {
+ if err := ds.exportDatabases(
+ ctx,
+ origin,
+ databases,
+ cluster.Spec.Bootstrap.InitDB.Import.PgDumpExtraOptions,
+ ); err != nil {
return err
}
- if err := ds.importDatabases(ctx, destination, databases); err != nil {
+ if err := ds.importDatabases(
+ ctx,
+ destination,
+ databases,
+ cluster.Spec.Bootstrap.InitDB.Import.PgRestoreExtraOptions,
+ ); err != nil {
return err
}
diff --git a/tests/utils/import_db.go b/tests/utils/import_db.go
index c3c7412f8d..ccb5e62175 100644
--- a/tests/utils/import_db.go
+++ b/tests/utils/import_db.go
@@ -70,6 +70,8 @@ func ImportDatabaseMicroservice(
Source: apiv1.ImportSource{
ExternalCluster: sourceClusterName,
},
+ PgDumpExtraOptions: []string{"--jobs=2"},
+ PgRestoreExtraOptions: []string{"--jobs=2"},
PostImportApplicationSQL: []string{"SELECT 1"},
},
},
From 15252a126054d6d289e00c01e50fce48d44cb424 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Mon, 2 Dec 2024 14:44:39 +0100
Subject: [PATCH 180/836] fix(initdb): ensure `primary_slot_name` is empty on a
primary (#6219)
Although harmless, remove the `primary_slot_name` definition from
the `override.conf` file on a primary.
Closes #6199
Signed-off-by: Armando Ruocco
---
pkg/management/postgres/configuration.go | 9 +++++++--
pkg/management/postgres/initdb.go | 5 ++---
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go
index 3dedfe9da5..1cc414ea92 100644
--- a/pkg/management/postgres/configuration.go
+++ b/pkg/management/postgres/configuration.go
@@ -252,7 +252,9 @@ func UpdateReplicaConfiguration(pgData, primaryConnInfo, slotName string) (chang
}
// configurePostgresOverrideConfFile writes the content of override.conf file, including
-// replication information
+// replication information. The “primary_slot_name` parameter will be generated only when the parameter slotName is not
+// empty.
+// Returns a boolean indicating if any changes were done and any errors encountered
func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) {
targetFile := path.Join(pgData, constants.PostgresqlOverrideConfigurationFile)
options := map[string]string{
@@ -260,10 +262,13 @@ func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string)
"/controller/manager wal-restore --log-destination %s/%s.json %%f %%p",
postgres.LogPath, postgres.LogFileName),
"recovery_target_timeline": "latest",
- "primary_slot_name": slotName,
"primary_conninfo": primaryConnInfo,
}
+ if len(slotName) > 0 {
+ options["primary_slot_name"] = slotName
+ }
+
// Ensure that override.conf file contains just the above options
changed, err = configfile.WritePostgresConfiguration(targetFile, options)
if err != nil {
diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go
index cc338d5201..e78d434dca 100644
--- a/pkg/management/postgres/initdb.go
+++ b/pkg/management/postgres/initdb.go
@@ -430,7 +430,6 @@ func (info InitInfo) Bootstrap(ctx context.Context) error {
// Prepare the managed configuration file (override.conf)
primaryConnInfo := info.GetPrimaryConnInfo()
- slotName := cluster.GetSlotNameFromInstanceName(info.PodName)
if isImportBootstrap {
// Write a special configuration for the import phase
@@ -439,7 +438,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error {
}
} else {
// Write standard replication configuration
- if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, slotName); err != nil {
+ if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil {
return fmt.Errorf("while configuring Postgres for replication: %w", err)
}
}
@@ -466,7 +465,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error {
// In case of import bootstrap, we restore the standard configuration file content
if isImportBootstrap {
/// Write standard replication configuration
- if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, slotName); err != nil {
+ if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil {
return fmt.Errorf("while configuring Postgres for replication: %w", err)
}
From 14627d56ffe0531f40bdb9f508356855e9706f0b Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Mon, 2 Dec 2024 14:48:49 +0100
Subject: [PATCH 181/836] chore: add `lint-fix` to the Makefile commands
(#6226)
Following the used of `make lint` usually is required to type a
different command to automatically fix the issues, this solve
the problem by having everything in the same make command prefix.
Signed-off-by: Jonathan Gonzalez V.
---
Makefile | 3 +++
1 file changed, 3 insertions(+)
diff --git a/Makefile b/Makefile
index bf85b798eb..2b93fba0ea 100644
--- a/Makefile
+++ b/Makefile
@@ -233,6 +233,9 @@ vet: ## Run go vet against code.
lint: ## Run the linter.
golangci-lint run
+lint-fix: ## Run the linter with --fix.
+ golangci-lint run --fix
+
shellcheck: ## Shellcheck for the hack directory.
@{ \
set -e ;\
From 2f29cf19b36e44e60de2f866f728764d783486b2 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Mon, 2 Dec 2024 15:20:09 +0100
Subject: [PATCH 182/836] fix: add `PhaseUnrecoverable` when no PVCs exist
(#6170)
Set the cluster phase to `Unrecoverable` when all previously generated
`PersistentVolumeClaims` are no longer present.
Closes #5912
Closes #3819
Signed-off-by: Armando Ruocco
Signed-off-by: Gabriele Bartolini
Co-authored-by: Gabriele Bartolini
---
api/v1/cluster_types.go | 2 +-
internal/controller/cluster_create.go | 8 ++++++++
2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index c841a04954..b81585b961 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -523,7 +523,7 @@ const (
PhaseImageCatalogError = "Cluster has incomplete or invalid image catalog"
// PhaseUnrecoverable for an unrecoverable cluster
- PhaseUnrecoverable = "Cluster is in an unrecoverable state, needs manual intervention"
+ PhaseUnrecoverable = "Cluster is unrecoverable and needs manual intervention"
// PhaseArchitectureBinaryMissing is the error phase describing a missing architecture
PhaseArchitectureBinaryMissing = "Cluster cannot execute instance online upgrade due to missing architecture binary"
diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go
index acda6f82fa..2aa3a51d47 100644
--- a/internal/controller/cluster_create.go
+++ b/internal/controller/cluster_create.go
@@ -1062,6 +1062,14 @@ func (r *ClusterReconciler) createPrimaryInstance(
// reconciliation loop is started by the informers.
contextLogger.Info("refusing to create the primary instance while the latest generated serial is not zero",
"latestGeneratedNode", cluster.Status.LatestGeneratedNode)
+
+ if err := r.RegisterPhase(ctx, cluster,
+ apiv1.PhaseUnrecoverable,
+ "One or more instances were previously created, but no PersistentVolumeClaims (PVCs) exist. "+
+ "The cluster is in an unrecoverable state. To resolve this, restore the cluster from a recent backup.",
+ ); err != nil {
+ return ctrl.Result{}, fmt.Errorf("while registering the unrecoverable phase: %w", err)
+ }
return ctrl.Result{}, nil
}
From 72a0796382a3518279a4fc65ace30b29f7547e2c Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Mon, 2 Dec 2024 15:25:34 +0100
Subject: [PATCH 183/836] fix(plugin): add `restoreJobHookCapabilities` in the
`PluginStatus` (#6225)
This patch adds the `restoreJobHookCapabilities` field to the
`pluginStatus`, which is currently missing.
Signed-off-by: Armando Ruocco
---
.wordlist-en-custom.txt | 3 +++
api/v1/cluster_types.go | 5 +++++
api/v1/zz_generated.deepcopy.go | 5 +++++
.../crd/bases/postgresql.cnpg.io_clusters.yaml | 7 +++++++
docs/src/cloudnative-pg.v1.md | 8 ++++++++
internal/cnpi/plugin/connection/connection.go | 17 +++++++++++------
internal/cnpi/plugin/connection/metadata.go | 13 +++++++------
internal/controller/cluster_plugins.go | 1 +
8 files changed, 47 insertions(+), 12 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index fc6ba07aae..4d811ab647 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -370,6 +370,8 @@ ReplicationTLSSecret
ResizingPVC
ResourceRequirements
ResourceVersion
+RestoreJobHook
+RestoreJobHookCapabilities
RetentionPolicy
RoleBinding
RoleConfiguration
@@ -1121,6 +1123,7 @@ resizingPVC
resourceVersion
resourcerequirements
restoreAdditionalCommandArgs
+restoreJobHookCapabilities
resync
retentionPolicy
reusePVC
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index b81585b961..bb66dac8f2 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -2141,6 +2141,11 @@ type PluginStatus struct {
// +optional
BackupCapabilities []string `json:"backupCapabilities,omitempty"`
+ // RestoreJobHookCapabilities are the list of capabilities of the
+ // plugin regarding the RestoreJobHook management
+ // +optional
+ RestoreJobHookCapabilities []string `json:"restoreJobHookCapabilities,omitempty"`
+
// Status contain the status reported by the plugin through the SetStatusInCluster interface
// +optional
Status string `json:"status,omitempty"`
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index a0b80fcd9c..014362a084 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -1920,6 +1920,11 @@ func (in *PluginStatus) DeepCopyInto(out *PluginStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.RestoreJobHookCapabilities != nil {
+ in, out := &in.RestoreJobHookCapabilities, &out.RestoreJobHookCapabilities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus.
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index eca57c182e..51914f02d2 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -6024,6 +6024,13 @@ spec:
items:
type: string
type: array
+ restoreJobHookCapabilities:
+ description: |-
+ RestoreJobHookCapabilities are the list of capabilities of the
+ plugin regarding the RestoreJobHook management
+ items:
+ type: string
+ type: array
status:
description: Status contain the status reported by the plugin
through the SetStatusInCluster interface
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 2ef5d831f4..662b01af57 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -3637,6 +3637,14 @@ plugin regarding the WAL management
plugin regarding the Backup management
+restoreJobHookCapabilities
+[]string
+
+
+ RestoreJobHookCapabilities are the list of capabilities of the
+plugin regarding the RestoreJobHook management
+
+
status
string
diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go
index 0e9826d530..34e9574d05 100644
--- a/internal/cnpi/plugin/connection/connection.go
+++ b/internal/cnpi/plugin/connection/connection.go
@@ -264,12 +264,13 @@ func (pluginData *data) loadRestoreJobHooksCapabilities(ctx context.Context) err
// the internal metadata
func (pluginData *data) Metadata() Metadata {
result := Metadata{
- Name: pluginData.name,
- Version: pluginData.version,
- Capabilities: make([]string, len(pluginData.capabilities)),
- OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)),
- WALCapabilities: make([]string, len(pluginData.walCapabilities)),
- BackupCapabilities: make([]string, len(pluginData.backupCapabilities)),
+ Name: pluginData.name,
+ Version: pluginData.version,
+ Capabilities: make([]string, len(pluginData.capabilities)),
+ OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)),
+ WALCapabilities: make([]string, len(pluginData.walCapabilities)),
+ BackupCapabilities: make([]string, len(pluginData.backupCapabilities)),
+ RestoreJobHookCapabilities: make([]string, len(pluginData.restoreJobHooksCapabilities)),
}
for i := range pluginData.capabilities {
@@ -288,6 +289,10 @@ func (pluginData *data) Metadata() Metadata {
result.BackupCapabilities[i] = pluginData.backupCapabilities[i].String()
}
+ for i := range pluginData.restoreJobHooksCapabilities {
+ result.RestoreJobHookCapabilities[i] = pluginData.restoreJobHooksCapabilities[i].String()
+ }
+
return result
}
diff --git a/internal/cnpi/plugin/connection/metadata.go b/internal/cnpi/plugin/connection/metadata.go
index 21f28652c3..a17e4d9ae0 100644
--- a/internal/cnpi/plugin/connection/metadata.go
+++ b/internal/cnpi/plugin/connection/metadata.go
@@ -19,10 +19,11 @@ package connection
// Metadata expose the metadata as discovered
// from a plugin
type Metadata struct {
- Name string
- Version string
- Capabilities []string
- OperatorCapabilities []string
- WALCapabilities []string
- BackupCapabilities []string
+ Name string
+ Version string
+ Capabilities []string
+ OperatorCapabilities []string
+ WALCapabilities []string
+ BackupCapabilities []string
+ RestoreJobHookCapabilities []string
}
diff --git a/internal/controller/cluster_plugins.go b/internal/controller/cluster_plugins.go
index 2b6b31f4a0..845af5e6f7 100644
--- a/internal/controller/cluster_plugins.go
+++ b/internal/controller/cluster_plugins.go
@@ -43,6 +43,7 @@ func (r *ClusterReconciler) updatePluginsStatus(ctx context.Context, cluster *ap
cluster.Status.PluginStatus[i].OperatorCapabilities = entry.OperatorCapabilities
cluster.Status.PluginStatus[i].WALCapabilities = entry.WALCapabilities
cluster.Status.PluginStatus[i].BackupCapabilities = entry.BackupCapabilities
+ cluster.Status.PluginStatus[i].RestoreJobHookCapabilities = entry.RestoreJobHookCapabilities
}
// If nothing changes, there's no need to hit the API server
From 1b924a370a65f5a9fe3b710f0f2d6f276c991dc6 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Mon, 2 Dec 2024 21:36:48 +0100
Subject: [PATCH 184/836] doc: clarify usage of endpointCA field with object
store (#5537)
Closes #5308
Signed-off-by: Jaime Silvela
---
docs/src/appendixes/object_stores.md | 37 ++++++++++++++++++++++------
docs/src/certificates.md | 4 +--
2 files changed, 31 insertions(+), 10 deletions(-)
diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md
index 991b8fa14e..2fc32452ca 100644
--- a/docs/src/appendixes/object_stores.md
+++ b/docs/src/appendixes/object_stores.md
@@ -129,6 +129,7 @@ spec:
In case you're using **Digital Ocean Spaces**, you will have to use the Path-style syntax.
In this example, it will use the `bucket` from **Digital Ocean Spaces** in the region `SFO3`.
+
```yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
@@ -142,10 +143,31 @@ spec:
[...]
```
-!!! Important
- Suppose you configure an Object Storage provider which uses a certificate signed with a private CA,
- like when using MinIO via HTTPS. In that case, you need to set the option `endpointCA`
- referring to a secret containing the CA bundle so that Barman can verify the certificate correctly.
+### Using Object Storage with a private CA
+
+Suppose you configure an Object Storage provider which uses a certificate
+signed with a private CA, for example when using MinIO via HTTPS. In that case,
+you need to set the option `endpointCA` inside `barmanObjectStore` referring
+to a secret containing the CA bundle, so that Barman can verify the certificate
+correctly.
+You can find instructions on creating a secret using your cert files in the
+[certificates](../certificates.md#example) document.
+Once you have created the secret, you can populate the `endpointCA` as in the
+following example:
+
+``` yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+[...]
+spec:
+ [...]
+ backup:
+ barmanObjectStore:
+ endpointURL:
+ endpointCA:
+ name: my-ca-secret
+ key: ca.crt
+```
!!! Note
If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can
@@ -186,7 +208,7 @@ On the other side, using both **Storage account access key** or **Storage accoun
the credentials need to be stored inside a Kubernetes Secret, adding data entries only when
needed. The following command performs that:
-```
+``` sh
kubectl create secret generic azure-creds \
--from-literal=AZURE_STORAGE_ACCOUNT= \
--from-literal=AZURE_STORAGE_KEY= \
@@ -226,7 +248,7 @@ spec:
When using the Azure Blob Storage, the `destinationPath` fulfills the following
structure:
-```
+``` sh
://..core.windows.net/
```
@@ -238,7 +260,7 @@ which is also called **storage account name**, is included in the used host name
If you are using a different implementation of the Azure Blob Storage APIs,
the `destinationPath` will have the following structure:
-```
+``` sh
://://
```
@@ -266,7 +288,6 @@ without having to set any credentials. In particular, you need to:
Please use the following example as a reference:
-
```yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
diff --git a/docs/src/certificates.md b/docs/src/certificates.md
index b5e2d1f49f..c9cc2eb95c 100644
--- a/docs/src/certificates.md
+++ b/docs/src/certificates.md
@@ -129,14 +129,14 @@ Given the following files:
Create a secret containing the CA certificate:
-```
+``` sh
kubectl create secret generic my-postgresql-server-ca \
--from-file=ca.crt=./server-ca.crt
```
Create a secret with the TLS certificate:
-```
+``` sh
kubectl create secret tls my-postgresql-server \
--cert=./server.crt --key=./server.key
```
From 255262a795254e4e1aaca6f0a2a1b28293e8913f Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Tue, 3 Dec 2024 09:29:32 +0100
Subject: [PATCH 185/836] chore: remove unknown fields and improve CSV specs
(#6107)
There was some fields that were not required and/or needed and there was
other that could have an improvement in the look when displaying on
the OpenShift web interface.
Closes #5966
Signed-off-by: Jonathan Gonzalez V.
---
.spellcheck.yaml | 3 +
.wordlist-en-custom.txt | 30 ++++
api/v1/cluster_types.go | 1 -
.../bases/postgresql.cnpg.io_clusters.yaml | 13 +-
.../cloudnative-pg.clusterserviceversion.yaml | 140 +++++++++++++++---
5 files changed, 160 insertions(+), 27 deletions(-)
diff --git a/.spellcheck.yaml b/.spellcheck.yaml
index 540285ffcd..d60d3bf957 100644
--- a/.spellcheck.yaml
+++ b/.spellcheck.yaml
@@ -3,6 +3,7 @@ matrix:
sources:
- 'docs/src/*.md'
- 'docs/src/*/*.md'
+ - 'config/olm-manifests/bases/*.yaml'
default_encoding: utf-8
aspell:
lang: en
@@ -25,4 +26,6 @@ matrix:
close: '(?P=open)'
- open: '(?P)'
+ - open: '.*base64data.*'
+ close: "$"
- pyspelling.filters.url:
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 4d811ab647..912dc3d759 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -1,3 +1,4 @@
+
AES
API's
APIs
@@ -9,6 +10,7 @@ AdditionalCommandArgs
AdditionalPodAffinity
AdditionalPodAntiAffinity
AffinityConfiguration
+AllNamespaces
AntiAffinity
AppArmor
AppArmorProfile
@@ -244,6 +246,7 @@ Milsted
MinIO
Minikube
MonitoringConfiguration
+MultiNamespace
NFS
NGINX
NOBYPASSRLS
@@ -276,6 +279,7 @@ Openshift
OperatorCapabilities
OperatorGroup
OperatorHub
+OwnNamespace
PDB
PDBs
PGAudit
@@ -403,6 +407,7 @@ ScheduledBackupSpec
ScheduledBackupStatus
ScheduledBackups
Scorsolini
+Seccomp
SeccompProfile
SecretKeySelector
SecretRefs
@@ -424,6 +429,7 @@ ServiceUpdateStrategy
SetStatusInCluster
ShutdownCheckpointToken
Silvela
+SingleNamespace
Slonik
SnapshotOwnerReference
SnapshotType
@@ -438,6 +444,7 @@ SubscriptionReclaimPolicy
SubscriptionSpec
SubscriptionStatus
SuccessfullyExtracted
+SuperUserSecret
SwitchReplicaClusterStatus
SyncReplicaElectionConstraints
SynchronizeReplicas
@@ -513,6 +520,7 @@ allocator
allowConnections
allowPrivilegeEscalation
allowVolumeExpansion
+alm
amd
angus
anonymization
@@ -524,6 +532,7 @@ apidoc
apimachinery
apis
apiserver
+apiservicedefinitions
apparmor
appdb
applicationCredentials
@@ -586,6 +595,7 @@ bindPassword
bindSearchAuth
bitmask
bool
+booleanSwitch
bootstrapconfiguration
bootstrapinitdb
bootstraprecovery
@@ -653,6 +663,7 @@ connectionLimit
connectionParameters
connectionString
conninfo
+containerImage
containerPort
controldata
coredump
@@ -665,6 +676,7 @@ cpu
crc
crds
crdview
+createdAt
createdb
createrole
createuser
@@ -684,6 +696,7 @@ currentPrimaryTimestamp
customQueriesConfigMap
customQueriesSecret
customizable
+customresourcedefinitions
cutover
cyber
dT
@@ -716,6 +729,8 @@ dir
disableDefaultQueries
disablePassword
disabledDefaultServices
+displayName
+displayName
distro
distroless
distros
@@ -773,6 +788,7 @@ filesystem
finalizer
findstr
fio
+fips
firstRecoverabilityPoint
firstRecoverabilityPointByMethod
freddie
@@ -781,6 +797,7 @@ gapped
gc
gcc
gce
+gcp
gcs
gcsCredentials
geocoder
@@ -837,6 +854,7 @@ initdb
initialise
initializingPVC
inplace
+installModes
installplans
instanceID
instanceName
@@ -925,6 +943,7 @@ maxSyncReplicas
maxwait
mcache
md
+mediatype
mem
memstats
metav
@@ -934,6 +953,7 @@ microservice
microservices
microsoft
minApplyDelay
+minKubeVersion
minSyncReplicas
minikube
minio
@@ -1034,10 +1054,12 @@ png
podAffinityTerm
podAntiAffinity
podAntiAffinityType
+podCount
podMetricsEndpoints
podMonitorMetricRelabelings
podMonitorRelabelings
podName
+podStatuses
podmonitor
podtemplates
poolMode
@@ -1120,6 +1142,7 @@ req
requiredDuringSchedulingIgnoredDuringExecution
resizeInUseVolumes
resizingPVC
+resourceRequirements
resourceVersion
resourcerequirements
restoreAdditionalCommandArgs
@@ -1184,6 +1207,7 @@ shutdownCheckpointToken
sig
sigs
singlenamespace
+skipRange
slotPrefix
smartShutdownTimeout
snapshotBackupStatus
@@ -1191,6 +1215,7 @@ snapshotOwnerReference
snapshotted
snapshotting
sourceNamespace
+specDescriptors
specificities
sql
src
@@ -1210,6 +1235,7 @@ standbyNamesPre
startDelay
startedAt
stateful
+statusDescriptors
stderr
stdout
stedolan
@@ -1236,6 +1262,7 @@ superuserSecret
superuserSecretVersion
sv
svc
+svg
switchReplicaClusterStatus
switchoverDelay
switchovers
@@ -1288,6 +1315,7 @@ transactional
transactionid
tx
ubi
+ui
uid
ul
un
@@ -1305,6 +1333,7 @@ updateStrategy
upgradable
uptime
uri
+url
usename
usernamepassword
usr
@@ -1341,5 +1370,6 @@ wsl
www
xact
xlog
+xml
yaml
yml
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index bb66dac8f2..9bb1edef5c 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -2073,7 +2073,6 @@ type ManagedServices struct {
type ManagedService struct {
// SelectorType specifies the type of selectors that the service will have.
// Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.
- // +kubebuilder:validation:Enum=rw;r;ro
SelectorType ServiceSelectorType `json:"selectorType"`
// UpdateStrategy describes how the service differences should be reconciled
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 51914f02d2..4316b14329 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -3232,18 +3232,13 @@ spec:
It includes the type of service and its associated template specification.
properties:
selectorType:
- allOf:
- - enum:
- - rw
- - r
- - ro
- - enum:
- - rw
- - r
- - ro
description: |-
SelectorType specifies the type of selectors that the service will have.
Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.
+ enum:
+ - rw
+ - r
+ - ro
type: string
serviceTemplate:
description: ServiceTemplate is the template specification
diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
index 0bf3485944..a8d0c40f05 100644
--- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
+++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
@@ -143,7 +143,7 @@ spec:
- 'urn:alm:descriptor:io.kubernetes:Clusters'
statusDescriptors:
- displayName: Phase
- description: Current backupphase
+ description: Current backup phase
path: phase
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes.phase'
@@ -174,6 +174,18 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:text'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Image section
+ - path: imagePullSecrets
+ displayName: Image Pull Secret
+ description: List of secrets to use for pulling the images
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: imagePullSecrets[0].name
+ displayName: Image Pull Secret
+ description: Secret for pulling the image. If empty, no secret will be used
+ x-descriptors:
+ - 'urn:alm:descriptor:io.kubernetes:Secret'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
- path: imageName
displayName: Image Name
description: Name of the PostgreSQL container image
@@ -198,6 +210,13 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:hidden'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: imageCatalogRef
+ displayName: Image Catalog
+ description: The name of the image catalog to use
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Instances section
- path: instances
displayName: Instances
description: Number of instances required in the cluster
@@ -215,6 +234,34 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:number'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: enablePDB
+ displayName: Enable Pod Disruption Budget
+ description: Boolean to enable or disable the Pod Disruption Budget
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: priorityClassName
+ displayName: Priority Class Name
+ description: The name of the Priority Class to use in every generated Pod
+ x-descriptors:
+ - 'urn:alm:descriptor:io.kubernetes:PriorityClass'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: env
+ displayName: Environment Variables
+ description: Environment variables to set in the pods created in the cluster
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: envFrom
+ displayName: Environment Variables from ConfigMap
+ description: ConfigMap to use as environment variables in the pods created in the cluster
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: seccompProfile
+ displayName: Seccomp Profile applied to every pod in the cluster
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # PostgreSQL Configuration section
- path: postgresql
displayName: PostgreSQL Configuration
description: Options for postgresql.conf
@@ -226,11 +273,8 @@ spec:
- path: postgresql.pg_hba[0]
displayName: pg_hba rules
description: PostgreSQL Host Based Authentication rules
- - path: postgresql.epas.audit
- displayName: EPAS Configuration
- description: Boolean to enable edb_audit logging
- path: postgresql.promotionTimeout
- displayName: pgctl Timeout
+ displayName: pg_ctl Timeout
description: maximum number of seconds to wait when promoting an instance to primary
- path: postgresql.shared_preload_libraries[0]
displayName: Preload Libraries
@@ -259,6 +303,22 @@ spec:
description: Boolean to enable TLS
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
+ - path: tablespaces
+ displayName: Tablespaces
+ description: Configuration of the tablespaces
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: managed
+ displayName: Managed service
+ description: Resources managed by the operator
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: replicationSlots
+ displayName: Replication Slots Configuration
+ description: Configuration of the replication slots
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Bootstrap section
- path: bootstrap
displayName: Bootstrap Configuration
description: Instructions to bootstrap the cluster
@@ -269,12 +329,14 @@ spec:
description: The name of the Bootstrap secret
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:Secret'
+ # Replica cluster section
- path: replica
displayName: Replica
description: Replica cluster configuration
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:Secret'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Superuser section
- path: superuserSecret
displayName: Superuser Secret
description: |
@@ -284,7 +346,7 @@ spec:
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
- path: superuserSecret.name
displayName: Superuser Secret
- description: Name of hte Superuser Secret
+ description: Name of the Superuser Secret
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:Secret'
- path: enableSuperuserAccess
@@ -293,16 +355,13 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Certificates section
- path: certificates
displayName: Certificates
description: The configuration for the CA and related certificates
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
- - path: imagePullSecrets[0].name
- displayName: Image Pull Secret
- description: Secret for pulling the image. If empty, no secret will be used
- x-descriptors:
- - 'urn:alm:descriptor:io.kubernetes:Secret'
+ # Storage section
- path: storage
displayName: Storage
description: Configuration of the storage of the instances
@@ -336,6 +395,7 @@ spec:
path: storage.pvcTemplate
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Delay and timeout section
- path: startDelay
displayName: Maximum Start Delay
description: The time in seconds that is allowed for a PostgreSQL instance
@@ -350,6 +410,33 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:number'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: smartShutdownTimeout
+ displayName: Smart Shutdown Timeout
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:number'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: switchoverDelay
+ displayName: Switchover Delay
+ description: The time in seconds that is allowed for a PostgreSQL instance
+ to gracefully shutdown during a switchover
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:number'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: failoverDelay
+ displayName: Failover Delay
+ description: The amount of time (in seconds) to wait before triggering a failover
+ after the primary PostgreSQL instance in the cluster was detected to be unhealthy
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:number'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: livenessProbeTimeout
+ displayName: Liveness Probe Timeout
+ description: The time in seconds that is allowed for the liveness probe to
+ complete
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:number'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Affinity section
- path: affinity
displayName: Pod Affinity
description: Affinity/Anti-affinity rules for Pods
@@ -365,11 +452,13 @@ spec:
description: Key value pair of which nodes the pods can run
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Node'
+ # Resources section
- path: resources
- display: Resources
- description:
+ displayName: PostgreSQL Resources requirement
+ description: Resources requirement for the PostgreSQL instances
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements'
+ # Update strategy section
- path: primaryUpdateStrategy
displayName: Primary Update Strategy
x-descriptors:
@@ -378,6 +467,10 @@ spec:
displayName: Primary Update Method
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Backup section
+ - path: backup
+ displayName: Backup Configuration
+ description: Configuration to be used for backups
- path: backup.barmanObjectStore.endpointURL
displayName: Object Storage Endpoint
description: S3-compatible object storage Endpoint. If empty the S3 default is used
@@ -410,7 +503,7 @@ spec:
- 'urn:alm:descriptor:io.kubernetes:text'
- path: backup.barmanObjectStore.wal.encryption
displayName: WAL encryption
- description: WAL encryprion algorithm
+ description: WAL encryption algorithm
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:text'
- path: backup.barmanObjectStore.data.compression
@@ -420,7 +513,7 @@ spec:
- 'urn:alm:descriptor:io.kubernetes:text'
- path: backup.barmanObjectStore.data.encryption
displayName: Data encryption
- description: Data encryprion algorithm
+ description: Data encryption algorithm
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:text'
- path: backup.barmanObjectStore.data.immediateCheckpoint
@@ -431,6 +524,10 @@ spec:
displayName: Jobs
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:number'
+ # Maintenance Window section
+ - path: nodeMaintenanceWindow
+ displayName: Node Maintenance Window
+ description: The configuration of the maintenance window for Kubernetes nodes
- path: nodeMaintenanceWindow.inProgress
displayName: In Progress
description: Maintenance window for Kubernetes node upgrades is in progress
@@ -442,6 +539,7 @@ spec:
description: Should the existing PVCs be reused during Kubernetes upgrades?
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
+ # Monitoring section
- path: monitoring
displayName: Monitoring
description: The configuration of the monitoring infrastructure of this cluster
@@ -467,17 +565,25 @@ spec:
displayName: Enable PodMonitor resource
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
+ # External Clusters section
- path: externalClusters
displayName: External Clusters
description: List of external clusters which are used in the configuration
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:text'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Log Level section
- path: logLevel
displayName: Log Level
description: One of error, info (default), debug or trace
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Plugins section
+ - path: plugins
+ displayName: Plugins
+ description: List of plugins to be installed
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
statusDescriptors:
- displayName: Working Pods
description: Status Pods
@@ -551,7 +657,7 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:text'
- path: type
- description: Service type of the cluster to connect to ('rw' or 'rw')
+ description: Service type of the cluster to connect to ('rw' or 'ro')
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:text'
- path: instances
@@ -570,7 +676,7 @@ spec:
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
- path: pgbouncer.poolMode
displayName: PgBouncer PoolMode
- description: The poolmode to use. One of 'session' or 'transaction'.
+ description: The pool mode to use. One of 'session' or 'transaction'.
x-descriptors:
- 'urn:alm:descriptor:io.kubernetes:text'
- path: pgbouncer.authQuerySecret
From 248276cca2d113417c6db6a53d71af29ffe523ad Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Tue, 3 Dec 2024 11:04:59 +0100
Subject: [PATCH 186/836] fix(docs): add default rule for PgBouncer in `pg_hba`
(#6175)
Closes #6174
---
docs/src/postgresql_conf.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md
index b3e76b6c74..fdd3fc4fe3 100644
--- a/docs/src/postgresql_conf.md
+++ b/docs/src/postgresql_conf.md
@@ -331,6 +331,7 @@ local all all peer
hostssl postgres streaming_replica all cert
hostssl replication streaming_replica all cert
+hostssl all cnpg_pooler_pgbouncer all cert
```
Default rules:
From af56bb29ef230a59bd3ef90d8be34e5af161a466 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Tue, 3 Dec 2024 13:41:59 +0100
Subject: [PATCH 187/836] chore: make cnpg plugin commands compatible with OLM
(#6213)
Improve the plugins to run the commands fio, publication, subscription and
psql smoothly on OLM environments.
Closes #5824
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Marco Nenciarini
Co-authored-by: Marco Nenciarini
---
cmd/kubectl-cnpg/main.go | 4 +-
internal/cmd/plugin/fio/cmd.go | 2 +-
internal/cmd/plugin/fio/fio.go | 63 +++++++++++++++++----------
internal/cmd/plugin/logical/psql.go | 2 +
internal/cmd/plugin/plugin.go | 2 +-
internal/cmd/plugin/psql/psql.go | 8 +++-
internal/cmd/plugin/psql/psql_test.go | 4 ++
7 files changed, 57 insertions(+), 28 deletions(-)
diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go
index 128f6ea232..4a8157d057 100644
--- a/cmd/kubectl-cnpg/main.go
+++ b/cmd/kubectl-cnpg/main.go
@@ -66,8 +66,6 @@ func main() {
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
- plugin.ConfigureColor(cmd)
-
// If we're invoking the completion command we shouldn't try to create
// a Kubernetes client and we just let the Cobra flow to continue
if cmd.Name() == "completion" || cmd.Name() == "version" ||
@@ -75,6 +73,8 @@ func main() {
return nil
}
+ plugin.ConfigureColor(cmd)
+
return plugin.SetupKubernetesClient(configFlags)
},
}
diff --git a/internal/cmd/plugin/fio/cmd.go b/internal/cmd/plugin/fio/cmd.go
index 50d574fcba..fdb7b721d1 100644
--- a/internal/cmd/plugin/fio/cmd.go
+++ b/internal/cmd/plugin/fio/cmd.go
@@ -64,7 +64,7 @@ func NewCmd() *cobra.Command {
fmt.Printf("To remove this test you need to delete the Deployment, ConfigMap "+
"and PVC with the name %v\n\nThe most simple way to do this is to re-run the command that was run"+
"to generate the deployment with the --dry-run flag and pipe that output to kubectl delete, e.g.:\n\n"+
- "kubectl cnpg fio --dry-run | kubectl delete -f -", deploymentName)
+ "kubectl cnpg fio --dry-run | kubectl delete -f -\n", deploymentName)
}
},
}
diff --git a/internal/cmd/plugin/fio/fio.go b/internal/cmd/plugin/fio/fio.go
index c06141c690..8aa6088c67 100644
--- a/internal/cmd/plugin/fio/fio.go
+++ b/internal/cmd/plugin/fio/fio.go
@@ -29,6 +29,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
type fioCommand struct {
@@ -156,10 +157,44 @@ func (cmd *fioCommand) generateConfigMapObject() *corev1.ConfigMap {
return result
}
+func getSecurityContext() *corev1.SecurityContext {
+ runAs := int64(10001)
+ sc := &corev1.SecurityContext{
+ AllowPrivilegeEscalation: ptr.To(false),
+ RunAsNonRoot: ptr.To(true),
+ Capabilities: &corev1.Capabilities{
+ Drop: []corev1.Capability{
+ "ALL",
+ },
+ },
+ ReadOnlyRootFilesystem: ptr.To(true),
+ }
+ if utils.HaveSecurityContextConstraints() {
+ return sc
+ }
+
+ sc.RunAsUser = &runAs
+ sc.RunAsGroup = &runAs
+ sc.SeccompProfile = &corev1.SeccompProfile{
+ Type: corev1.SeccompProfileTypeRuntimeDefault,
+ }
+
+ return sc
+}
+
+func getPodSecurityContext() *corev1.PodSecurityContext {
+ if utils.HaveSecurityContextConstraints() {
+ return &corev1.PodSecurityContext{}
+ }
+ runAs := int64(10001)
+ return &corev1.PodSecurityContext{
+ FSGroup: &runAs,
+ }
+}
+
// createFioDeployment creates spec of deployment.
func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Deployment {
- runAs := int64(10001)
- fioDeployment := &appsv1.Deployment{
+ return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
@@ -229,22 +264,7 @@ func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Depl
InitialDelaySeconds: 60,
PeriodSeconds: 10,
},
- SecurityContext: &corev1.SecurityContext{
- AllowPrivilegeEscalation: ptr.To(false),
- SeccompProfile: &corev1.SeccompProfile{
- Type: corev1.SeccompProfileTypeRuntimeDefault,
- },
- RunAsGroup: &runAs,
- RunAsNonRoot: ptr.To(true),
- RunAsUser: &runAs,
-
- Capabilities: &corev1.Capabilities{
- Drop: []corev1.Capability{
- "ALL",
- },
- },
- ReadOnlyRootFilesystem: ptr.To(true),
- },
+ SecurityContext: getSecurityContext(),
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"memory": resource.MustParse("100M"),
@@ -303,13 +323,10 @@ func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Depl
},
},
},
- NodeSelector: map[string]string{},
- SecurityContext: &corev1.PodSecurityContext{
- FSGroup: &runAs,
- },
+ NodeSelector: map[string]string{},
+ SecurityContext: getPodSecurityContext(),
},
},
},
}
- return fioDeployment
}
diff --git a/internal/cmd/plugin/logical/psql.go b/internal/cmd/plugin/logical/psql.go
index 3c4b1c1670..a13d527235 100644
--- a/internal/cmd/plugin/logical/psql.go
+++ b/internal/cmd/plugin/logical/psql.go
@@ -64,6 +64,8 @@ func getSQLCommand(
) (*psql.Command, error) {
psqlArgs := []string{
connectionString,
+ "-U",
+ "postgres",
"-c",
sqlCommand,
}
diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go
index afaa602e1a..b9af618b84 100644
--- a/internal/cmd/plugin/plugin.go
+++ b/internal/cmd/plugin/plugin.go
@@ -98,7 +98,7 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error {
ClientInterface = kubernetes.NewForConfigOrDie(Config)
- return nil
+ return utils.DetectSecurityContextConstraints(ClientInterface.Discovery())
}
func createClient(cfg *rest.Config) error {
diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go
index 758a9f97b6..f20d5ce727 100644
--- a/internal/cmd/plugin/psql/psql.go
+++ b/internal/cmd/plugin/psql/psql.go
@@ -24,6 +24,7 @@ import (
"syscall"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/utils/strings/slices"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
@@ -102,7 +103,7 @@ func NewCommand(
// getKubectlInvocation gets the kubectl command to be executed
func (psql *Command) getKubectlInvocation() ([]string, error) {
- result := make([]string, 0, 11+len(psql.Args))
+ result := make([]string, 0, 13+len(psql.Args))
result = append(result, "kubectl", "exec")
if psql.AllocateTTY {
@@ -121,6 +122,11 @@ func (psql *Command) getKubectlInvocation() ([]string, error) {
return nil, err
}
+ // Default to `postgres` if no-user has been specified
+ if !slices.Contains(psql.Args, "-U") {
+ psql.Args = append([]string{"-U", "postgres"}, psql.Args...)
+ }
+
result = append(result, podName)
result = append(result, "--", "psql")
result = append(result, psql.Args...)
diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go
index a01dd7cbb8..682705e76b 100644
--- a/internal/cmd/plugin/psql/psql_test.go
+++ b/internal/cmd/plugin/psql/psql_test.go
@@ -95,6 +95,8 @@ var _ = Describe("psql launcher", func() {
"cluster-example-1",
"--",
"psql",
+ "-U",
+ "postgres",
))
})
@@ -120,6 +122,8 @@ var _ = Describe("psql launcher", func() {
"cluster-example-1",
"--",
"psql",
+ "-U",
+ "postgres",
"-c",
"select 1",
))
From ddd90b9e3f4cc824e8494d3424947e84c13e5e5a Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Tue, 3 Dec 2024 19:50:10 +0100
Subject: [PATCH 188/836] fix(pooler): set libpq environment variables in
PgBouncer pods (#6247)
This patch configures the following environment variables in PgBouncer
pods:
- `PGUSER`
- `PGDATABASE`
- `PGHOST`
- `PSQL_HISTORY`
These variables enable seamless access to the PgBouncer administrative
interface by allowing `psql` to connect directly from within the Pod
without requiring additional command-line options.
Fixes: #6242
Signed-off-by: Leonardo Cecchi
---
internal/cmd/manager/pgbouncer/cmd.go | 5 ++++
pkg/specs/pgbouncer/deployments.go | 9 +++++++
tests/e2e/pgbouncer_test.go | 35 +++++++++++++++++++++++++++
3 files changed, 49 insertions(+)
diff --git a/internal/cmd/manager/pgbouncer/cmd.go b/internal/cmd/manager/pgbouncer/cmd.go
index 138963dcfb..a511619112 100644
--- a/internal/cmd/manager/pgbouncer/cmd.go
+++ b/internal/cmd/manager/pgbouncer/cmd.go
@@ -19,10 +19,12 @@ package pgbouncer
import (
"fmt"
+ "os"
"github.com/spf13/cobra"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/pgbouncer/run"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
)
// NewCmd creates the "instance" command
@@ -31,6 +33,9 @@ func NewCmd() *cobra.Command {
Use: "pgbouncer",
Short: "pgbouncer management subfeatures",
SilenceErrors: true,
+ PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
+ return os.MkdirAll(postgres.TemporaryDirectory, 0o1777) //nolint:gosec
+ },
RunE: func(_ *cobra.Command, _ []string) error {
return fmt.Errorf("missing subcommand")
},
diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go
index 12fe9e0931..2f5d639502 100644
--- a/pkg/specs/pgbouncer/deployments.go
+++ b/pkg/specs/pgbouncer/deployments.go
@@ -19,6 +19,8 @@ limitations under the License.
package pgbouncer
import (
+ "path"
+
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -108,6 +110,13 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen
}, true).
WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "NAMESPACE", Value: pooler.Namespace}, true).
WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "POOLER_NAME", Value: pooler.Name}, true).
+ WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGUSER", Value: "pgbouncer"}, false).
+ WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGDATABASE", Value: "pgbouncer"}, false).
+ WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGHOST", Value: "/controller/run"}, false).
+ WithContainerEnv("pgbouncer", corev1.EnvVar{
+ Name: "PSQL_HISTORY",
+ Value: path.Join(postgres.TemporaryDirectory, ".psql_history"),
+ }, false).
WithContainerSecurityContext("pgbouncer", specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), true).
WithServiceAccountName(pooler.Name, true).
WithReadinessProbe("pgbouncer", &corev1.Probe{
diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go
index 3e7f9542f1..d07734c106 100644
--- a/tests/e2e/pgbouncer_test.go
+++ b/tests/e2e/pgbouncer_test.go
@@ -17,6 +17,10 @@ limitations under the License.
package e2e
import (
+ corev1 "k8s.io/api/core/v1"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
. "github.com/onsi/ginkgo/v2"
@@ -75,6 +79,16 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity),
assertReadWriteConnectionUsingPgBouncerService(namespace, clusterName,
poolerBasicAuthROSampleFile, false)
})
+
+ By("executing psql within the pgbouncer pod", func() {
+ pod, err := getPgbouncerPod(poolerBasicAuthRWSampleFile)
+ Expect(err).ToNot(HaveOccurred())
+
+ GinkgoWriter.Println(pod.Name)
+
+ err = runShowHelpInPod(pod)
+ Expect(err).ToNot(HaveOccurred())
+ })
})
It("can connect to Postgres via pgbouncer service using tls certificates", func() {
@@ -176,3 +190,24 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity),
})
})
})
+
+func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) {
+ poolerKey, err := env.GetResourceNamespacedNameFromYAML(sampleFile)
+ if err != nil {
+ return nil, err
+ }
+
+ Expect(err).ToNot(HaveOccurred())
+
+ var podList corev1.PodList
+ err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(poolerKey.Namespace),
+ ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey.Name})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(podList.Items)).Should(BeEquivalentTo(1))
+ return &podList.Items[0], nil
+}
+
+func runShowHelpInPod(pod *corev1.Pod) error {
+ _, _, err := env.ExecCommand(env.Ctx, *pod, "pgbouncer", nil, "psql", "-c", "SHOW HELP")
+ return err
+}
From 840920dafad32c083dffe6031880e34bce59feb7 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Wed, 4 Dec 2024 11:17:54 +0100
Subject: [PATCH 189/836] feat(initdb): add support for ICU and built-in locale
providers (#6220)
This patch enhances the PostgreSQL database initialization process
(`initdb`) by introducing support for the ICU locale provider (available
since PostgreSQL 16) and the built-in locale provider (available since
PostgreSQL 17). Users can now specify the desired locale provider when
initializing a new PostgreSQL cluster, offering improved localization
flexibility.
Closes #5386
Signed-off-by: Marco Nenciarini
Signed-off-by: Gabriele Bartolini
Signed-off-by: Tao Li
Co-authored-by: Gabriele Bartolini
Co-authored-by: Tao Li
---
.wordlist-en-custom.txt | 4 +
api/v1/cluster_types.go | 30 ++++++++
.../bases/postgresql.cnpg.io_clusters.yaml | 37 +++++++++
docs/src/bootstrap.md | 75 ++++++++++++++++---
docs/src/cloudnative-pg.v1.md | 42 +++++++++++
.../samples/cluster-example-initdb-icu.yaml | 19 +++++
pkg/specs/jobs.go | 15 ++++
pkg/specs/jobs_test.go | 41 ++++++++--
8 files changed, 244 insertions(+), 19 deletions(-)
create mode 100644 docs/src/samples/cluster-example-initdb-icu.yaml
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 912dc3d759..8cf809cd19 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -601,6 +601,7 @@ bootstrapinitdb
bootstraprecovery
br
bs
+builtinLocale
bw
byStatus
bypassrls
@@ -833,6 +834,8 @@ httpGet
https
hugepages
icu
+icuLocale
+icuRules
ident
imageCatalogRef
imageName
@@ -921,6 +924,7 @@ livenessProbeTimeout
lm
localeCType
localeCollate
+localeProvider
localhost
localobjectreference
locktype
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index 9bb1edef5c..affbfe0e60 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -1428,6 +1428,9 @@ type CertificatesStatus struct {
// BootstrapInitDB is the configuration of the bootstrap process when
// initdb is used
// Refer to the Bootstrap page of the documentation for more information.
+// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`"
+// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`"
+// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`"
type BootstrapInitDB struct {
// Name of the database used by the application. Default: `app`.
// +optional
@@ -1468,6 +1471,33 @@ type BootstrapInitDB struct {
// +optional
LocaleCType string `json:"localeCType,omitempty"`
+ // Sets the default collation order and character classification in the new database.
+ // +optional
+ Locale string `json:"locale,omitempty"`
+
+ // This option sets the locale provider for databases created in the new cluster.
+ // Available from PostgreSQL 16.
+ // +optional
+ LocaleProvider string `json:"localeProvider,omitempty"`
+
+ // Specifies the ICU locale when the ICU provider is used.
+ // This option requires `localeProvider` to be set to `icu`.
+ // Available from PostgreSQL 15.
+ // +optional
+ IcuLocale string `json:"icuLocale,omitempty"`
+
+ // Specifies additional collation rules to customize the behavior of the default collation.
+ // This option requires `localeProvider` to be set to `icu`.
+ // Available from PostgreSQL 16.
+ // +optional
+ IcuRules string `json:"icuRules,omitempty"`
+
+ // Specifies the locale name when the builtin provider is used.
+ // This option requires `localeProvider` to be set to `builtin`.
+ // Available from PostgreSQL 17.
+ // +optional
+ BuiltinLocale string `json:"builtinLocale,omitempty"`
+
// The value in megabytes (1 to 1024) to be passed to the `--wal-segsize`
// option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
// +kubebuilder:validation:Minimum=1
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 4316b14329..17242a5ec7 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -1495,6 +1495,12 @@ spec:
initdb:
description: Bootstrap the cluster via initdb
properties:
+ builtinLocale:
+ description: |-
+ Specifies the locale name when the builtin provider is used.
+ This option requires `localeProvider` to be set to `builtin`.
+ Available from PostgreSQL 17.
+ type: string
dataChecksums:
description: |-
Whether the `-k` option should be passed to initdb,
@@ -1508,6 +1514,18 @@ spec:
description: The value to be passed as option `--encoding`
for initdb (default:`UTF8`)
type: string
+ icuLocale:
+ description: |-
+ Specifies the ICU locale when the ICU provider is used.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 15.
+ type: string
+ icuRules:
+ description: |-
+ Specifies additional collation rules to customize the behavior of the default collation.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 16.
+ type: string
import:
description: |-
Bootstraps the new cluster by importing data from an existing PostgreSQL
@@ -1576,6 +1594,10 @@ spec:
- source
- type
type: object
+ locale:
+ description: Sets the default collation order and character
+ classification in the new database.
+ type: string
localeCType:
description: The value to be passed as option `--lc-ctype`
for initdb (default:`C`)
@@ -1584,6 +1606,11 @@ spec:
description: The value to be passed as option `--lc-collate`
for initdb (default:`C`)
type: string
+ localeProvider:
+ description: |-
+ This option sets the locale provider for databases created in the new cluster.
+ Available from PostgreSQL 16.
+ type: string
options:
description: |-
The list of options that must be passed to initdb when creating the cluster.
@@ -1789,6 +1816,16 @@ spec:
minimum: 1
type: integer
type: object
+ x-kubernetes-validations:
+ - message: builtinLocale is only available when localeProvider
+ is set to `builtin`
+ rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin'''
+ - message: icuLocale is only available when localeProvider is
+ set to `icu`
+ rule: '!has(self.icuLocale) || self.localeProvider == ''icu'''
+ - message: icuRules is only available when localeProvider is set
+ to `icu`
+ rule: '!has(self.icuRules) || self.localeProvider == ''icu'''
pg_basebackup:
description: |-
Bootstrap the cluster taking a physical backup of another compatible
diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md
index 6aff83a8a6..2a0518f67c 100644
--- a/docs/src/bootstrap.md
+++ b/docs/src/bootstrap.md
@@ -24,7 +24,7 @@ For more detailed information about this feature, please refer to the
CloudNativePG requires both the `postgres` user and database to
always exists. Using the local Unix Domain Socket, it needs to connect
as `postgres` user to the `postgres` database via `peer` authentication in
- order to perform administrative tasks on the cluster.
+ order to perform administrative tasks on the cluster.
**DO NOT DELETE** the `postgres` user or the `postgres` database!!!
!!! Info
@@ -204,36 +204,87 @@ The user that owns the database defaults to the database name instead.
The application user is not used internally by the operator, which instead
relies on the superuser to reconcile the cluster with the desired status.
-### Passing options to `initdb`
+### Passing Options to `initdb`
-The actual PostgreSQL data directory is created via an invocation of the
-`initdb` PostgreSQL command. If you need to add custom options to that command
-(i.e., to change the `locale` used for the template databases or to add data
-checksums), you can use the following parameters:
+The PostgreSQL data directory is initialized using the
+[`initdb` PostgreSQL command](https://www.postgresql.org/docs/current/app-initdb.html).
+
+CloudNativePG enables you to customize the behavior of `initdb` to modify
+settings such as default locale configurations and data checksums.
+
+!!! Warning
+ CloudNativePG acts only as a direct proxy to `initdb` for locale-related
+ options, due to the ongoing and significant enhancements in PostgreSQL's locale
+ support. It is your responsibility to ensure that the correct options are
+ provided, following the PostgreSQL documentation, and to verify that the
+ bootstrap process completes successfully.
+
+To include custom options in the `initdb` command, you can use the following
+parameters:
+
+builtinLocale
+: When `builtinLocale` is set to a value, CloudNativePG passes it to the
+ `--builtin-locale` option in `initdb`. This option controls the builtin locale, as
+ defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html)
+ from the PostgreSQL documentation (default: empty). Note that this option requires
+ `localeProvider` to be set to `builtin`. Available from PostgreSQL 17.
dataChecksums
-: When `dataChecksums` is set to `true`, CNPG invokes the `-k` option in
+: When `dataChecksums` is set to `true`, CloudNativePG invokes the `-k` option in
`initdb` to enable checksums on data pages and help detect corruption by the
I/O system - that would otherwise be silent (default: `false`).
encoding
-: When `encoding` set to a value, CNPG passes it to the `--encoding` option in `initdb`,
- which selects the encoding of the template database (default: `UTF8`).
+: When `encoding` set to a value, CloudNativePG passes it to the `--encoding`
+ option in `initdb`, which selects the encoding of the template database
+ (default: `UTF8`).
+
+icuLocale
+: When `icuLocale` is set to a value, CloudNativePG passes it to the
+ `--icu-locale` option in `initdb`. This option controls the ICU locale, as
+ defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html)
+ from the PostgreSQL documentation (default: empty).
+ Note that this option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 15.
+
+icuRules
+: When `icuRules` is set to a value, CloudNativePG passes it to the
+ `--icu-rules` option in `initdb`. This option controls the ICU locale, as
+ defined in ["Locale
+ Support"](https://www.postgresql.org/docs/current/locale.html) from the
+ PostgreSQL documentation (default: empty). Note that this option requires
+ `localeProvider` to be set to `icu`. Available from PostgreSQL 16.
+
+locale
+: When `locale` is set to a value, CloudNativePG passes it to the `--locale`
+ option in `initdb`. This option controls the locale, as defined in
+ ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from
+ the PostgreSQL documentation. By default, the locale parameter is empty. In
+ this case, environment variables such as `LANG` are used to determine the
+ locale. Be aware that these variables can vary between container images,
+ potentially leading to inconsistent behavior.
localeCollate
-: When `localeCollate` is set to a value, CNPG passes it to the `--lc-collate`
+: When `localeCollate` is set to a value, CloudNativePG passes it to the `--lc-collate`
option in `initdb`. This option controls the collation order (`LC_COLLATE`
subcategory), as defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html)
from the PostgreSQL documentation (default: `C`).
localeCType
-: When `localeCType` is set to a value, CNPG passes it to the `--lc-ctype` option in
+: When `localeCType` is set to a value, CloudNativePG passes it to the `--lc-ctype` option in
`initdb`. This option controls the collation order (`LC_CTYPE` subcategory), as
defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html)
from the PostgreSQL documentation (default: `C`).
+localeProvider
+: When `localeProvider` is set to a value, CloudNativePG passes it to the `--locale-provider`
+option in `initdb`. This option controls the locale provider, as defined in
+["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the
+PostgreSQL documentation (default: empty, which means `libc` for PostgreSQL).
+Available from PostgreSQL 15.
+
walSegmentSize
-: When `walSegmentSize` is set to a value, CNPG passes it to the `--wal-segsize`
+: When `walSegmentSize` is set to a value, CloudNativePG passes it to the `--wal-segsize`
option in `initdb` (default: not set - defined by PostgreSQL as 16 megabytes).
!!! Note
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 662b01af57..ea07a0bed8 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -1079,6 +1079,48 @@ enabling checksums on data pages (default: false)
The value to be passed as option --lc-ctype for initdb (default:C)
+locale
+string
+
+
+ Sets the default collation order and character classification in the new database.
+
+
+localeProvider
+string
+
+
+ This option sets the locale provider for databases created in the new cluster.
+Available from PostgreSQL 16.
+
+
+icuLocale
+string
+
+
+ Specifies the ICU locale when the ICU provider is used.
+This option requires localeProvider to be set to icu.
+Available from PostgreSQL 15.
+
+
+icuRules
+string
+
+
+ Specifies additional collation rules to customize the behavior of the default collation.
+This option requires localeProvider to be set to icu.
+Available from PostgreSQL 16.
+
+
+builtinLocale
+string
+
+
+ Specifies the locale name when the builtin provider is used.
+This option requires localeProvider to be set to builtin.
+Available from PostgreSQL 17.
+
+
walSegmentSize
int
diff --git a/docs/src/samples/cluster-example-initdb-icu.yaml b/docs/src/samples/cluster-example-initdb-icu.yaml
new file mode 100644
index 0000000000..3e9747effe
--- /dev/null
+++ b/docs/src/samples/cluster-example-initdb-icu.yaml
@@ -0,0 +1,19 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example-initdb-icu
+spec:
+ instances: 3
+
+ bootstrap:
+ initdb:
+ encoding: UTF8
+ localeCollate: en_US.UTF8
+ localeCType: en_US.UTF8
+ localeProvider: icu
+ icuLocale: en-US
+ # we want to order g and G after A (and before b)
+ icuRules: '&A < g <<< G'
+
+ storage:
+ size: 1Gi
diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go
index 3ccdd8c0d0..bbd5cd936c 100644
--- a/pkg/specs/jobs.go
+++ b/pkg/specs/jobs.go
@@ -142,6 +142,21 @@ func buildInitDBFlags(cluster apiv1.Cluster) (initCommand []string) {
if localeCType := config.LocaleCType; localeCType != "" {
options = append(options, fmt.Sprintf("--lc-ctype=%s", localeCType))
}
+ if locale := config.Locale; locale != "" {
+ options = append(options, fmt.Sprintf("--locale=%s", locale))
+ }
+ if localeProvider := config.LocaleProvider; localeProvider != "" {
+ options = append(options, fmt.Sprintf("--locale-provider=%s", localeProvider))
+ }
+ if icuLocale := config.IcuLocale; icuLocale != "" {
+ options = append(options, fmt.Sprintf("--icu-locale=%s", icuLocale))
+ }
+ if icuRules := config.IcuRules; icuRules != "" {
+ options = append(options, fmt.Sprintf("--icu-rules=%s", icuRules))
+ }
+ if builtinLocale := config.BuiltinLocale; builtinLocale != "" {
+ options = append(options, fmt.Sprintf("--builtin-locale=%s", builtinLocale))
+ }
if walSegmentSize := config.WalSegmentSize; walSegmentSize != 0 && utils.IsPowerOfTwo(walSegmentSize) {
options = append(options, fmt.Sprintf("--wal-segsize=%v", walSegmentSize))
}
diff --git a/pkg/specs/jobs_test.go b/pkg/specs/jobs_test.go
index 5d9c7eab32..378f1ae813 100644
--- a/pkg/specs/jobs_test.go
+++ b/pkg/specs/jobs_test.go
@@ -17,7 +17,9 @@ limitations under the License.
package specs
import (
- v1 "k8s.io/api/batch/v1"
+ "slices"
+
+ batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -37,8 +39,8 @@ var _ = Describe("Barman endpoint CA", func() {
},
}
- job := v1.Job{
- Spec: v1.JobSpec{
+ job := batchv1.Job{
+ Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{{}},
@@ -80,8 +82,8 @@ var _ = Describe("Barman endpoint CA", func() {
},
}
- job := v1.Job{
- Spec: v1.JobSpec{
+ job := batchv1.Job{
+ Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -118,8 +120,8 @@ var _ = Describe("Barman endpoint CA", func() {
},
}}
- job := v1.Job{
- Spec: v1.JobSpec{
+ job := batchv1.Job{
+ Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
@@ -165,4 +167,29 @@ var _ = Describe("Job created via InitDB", func() {
Expect(job.Spec.Template.Spec.Containers[0].Command).Should(ContainElement(
postInitApplicationSQLRefsFolder.toString()))
})
+
+ It("contains icu configuration", func() {
+ cluster := apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
+ Encoding: "UTF-8",
+ LocaleProvider: "icu",
+ IcuLocale: "und",
+ IcuRules: "&A < z <<< Z",
+ },
+ },
+ },
+ }
+ job := CreatePrimaryJobViaInitdb(cluster, 0)
+
+ jobCommand := job.Spec.Template.Spec.Containers[0].Command
+ Expect(jobCommand).Should(ContainElement("--initdb-flags"))
+ initdbFlags := jobCommand[slices.Index(jobCommand, "--initdb-flags")+1]
+ Expect(initdbFlags).Should(ContainSubstring("--encoding=UTF-8"))
+ Expect(initdbFlags).Should(ContainSubstring("--locale-provider=icu"))
+ Expect(initdbFlags).Should(ContainSubstring("--icu-locale=und"))
+ Expect(initdbFlags).ShouldNot(ContainSubstring("--locale="))
+ Expect(initdbFlags).Should(ContainSubstring("'--icu-rules=&A < z <<< Z'"))
+ })
})
From 0cdb7268c113bcf0e9a30079df9d445e6f9a172b Mon Sep 17 00:00:00 2001
From: Jack Langston <13301098+fullykubed@users.noreply.github.com>
Date: Wed, 4 Dec 2024 07:15:30 -0500
Subject: [PATCH 190/836] feat(pooler): expand configurable options for
PgBouncer (#6216)
This commit extends the `Pooler` API by supporting additional PgBouncer
configuration parameters. The newly supported parameters are:
- `cancel_wait_timeout`
- `dns_max_ttl`
- `dns_nxdomain_ttl`
- `listen_backlog`
- `max_packet_size`
- `pkt_buf`
- `sbuf_loopcnt`
- `server_tls_ciphers`
- `server_tls_protocols`
- `suspend_timeout`
- `tcp_defer_accept`
- `tcp_socket_buffer`
These additions provide greater flexibility, control, and responsibility
over PgBouncer's behavior, catering to a broader range of use cases and
deployment scenarios.
Closes #5276
Signed-off-by: Jack Langston
Signed-off-by: Gabriele Bartolini
Co-authored-by: Jack Langston
Co-authored-by: Gabriele Bartolini
---
api/v1/pooler_webhook.go | 13 +++++++++++++
docs/src/connection_pooling.md | 15 ++++++++++++++-
docs/src/samples/pooler-tls.yaml | 2 ++
3 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/api/v1/pooler_webhook.go b/api/v1/pooler_webhook.go
index 24241a836a..b86ac2622b 100644
--- a/api/v1/pooler_webhook.go
+++ b/api/v1/pooler_webhook.go
@@ -38,25 +38,32 @@ var (
AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{
"application_name_add_host",
"autodb_idle_timeout",
+ "cancel_wait_timeout",
"client_idle_timeout",
"client_login_timeout",
"default_pool_size",
"disable_pqexec",
+ "dns_max_ttl",
+ "dns_nxdomain_ttl",
"idle_transaction_timeout",
"ignore_startup_parameters",
+ "listen_backlog",
"log_connections",
"log_disconnections",
"log_pooler_errors",
"log_stats",
"max_client_conn",
"max_db_connections",
+ "max_packet_size",
"max_prepared_statements",
"max_user_connections",
"min_pool_size",
+ "pkt_buf",
"query_timeout",
"query_wait_timeout",
"reserve_pool_size",
"reserve_pool_timeout",
+ "sbuf_loopcnt",
"server_check_delay",
"server_check_query",
"server_connect_timeout",
@@ -67,12 +74,18 @@ var (
"server_reset_query",
"server_reset_query_always",
"server_round_robin",
+ "server_tls_ciphers",
+ "server_tls_protocols",
"stats_period",
+ "suspend_timeout",
+ "tcp_defer_accept",
+ "tcp_socket_buffer",
"tcp_keepalive",
"tcp_keepcnt",
"tcp_keepidle",
"tcp_keepintvl",
"tcp_user_timeout",
+ "track_extra_parameters",
"verbose",
})
)
diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md
index cf6b9bc310..0ac9d50bfe 100644
--- a/docs/src/connection_pooling.md
+++ b/docs/src/connection_pooling.md
@@ -331,13 +331,17 @@ are the ones directly set by PgBouncer.
- [`application_name_add_host`](https://www.pgbouncer.org/config.html#application_name_add_host)
- [`autodb_idle_timeout`](https://www.pgbouncer.org/config.html#autodb_idle_timeout)
+- [`cancel_wait_timeout`](https://www.pgbouncer.org/config.html#cancel_wait_timeout)
- [`client_idle_timeout`](https://www.pgbouncer.org/config.html#client_idle_timeout)
- [`client_login_timeout`](https://www.pgbouncer.org/config.html#client_login_timeout)
- [`default_pool_size`](https://www.pgbouncer.org/config.html#default_pool_size)
- [`disable_pqexec`](https://www.pgbouncer.org/config.html#disable_pqexec)
+- [`dns_max_ttl`](https://www.pgbouncer.org/config.html#dns_max_ttl)
+- [`dns_nxdomain_ttl`](https://www.pgbouncer.org/config.html#dns_nxdomain_ttl)
- [`idle_transaction_timeout`](https://www.pgbouncer.org/config.html#idle_transaction_timeout)
- [`ignore_startup_parameters`](https://www.pgbouncer.org/config.html#ignore_startup_parameters):
- to be appended to `extra_float_digits,options` - required by CNP
+ to be appended to `extra_float_digits,options` - required by CloudNativePG
+- [`listen_backlog`](https://www.pgbouncer.org/config.html#listen_backlog)
- [`log_connections`](https://www.pgbouncer.org/config.html#log_connections)
- [`log_disconnections`](https://www.pgbouncer.org/config.html#log_disconnections)
- [`log_pooler_errors`](https://www.pgbouncer.org/config.html#log_pooler_errors)
@@ -346,13 +350,16 @@ are the ones directly set by PgBouncer.
export as described in the ["Monitoring"](#monitoring) section below
- [`max_client_conn`](https://www.pgbouncer.org/config.html#max_client_conn)
- [`max_db_connections`](https://www.pgbouncer.org/config.html#max_db_connections)
+- [`max_packet_size`](https://www.pgbouncer.org/config.html#max_packet_size)
- [`max_prepared_statements`](https://www.pgbouncer.org/config.html#max_prepared_statements)
- [`max_user_connections`](https://www.pgbouncer.org/config.html#max_user_connections)
- [`min_pool_size`](https://www.pgbouncer.org/config.html#min_pool_size)
+- [`pkt_buf`](https://www.pgbouncer.org/config.html#pkt_buf)
- [`query_timeout`](https://www.pgbouncer.org/config.html#query_timeout)
- [`query_wait_timeout`](https://www.pgbouncer.org/config.html#query_wait_timeout)
- [`reserve_pool_size`](https://www.pgbouncer.org/config.html#reserve_pool_size)
- [`reserve_pool_timeout`](https://www.pgbouncer.org/config.html#reserve_pool_timeout)
+- [`sbuf_loopcnt`](https://www.pgbouncer.org/config.html#sbuf_loopcnt)
- [`server_check_delay`](https://www.pgbouncer.org/config.html#server_check_delay)
- [`server_check_query`](https://www.pgbouncer.org/config.html#server_check_query)
- [`server_connect_timeout`](https://www.pgbouncer.org/config.html#server_connect_timeout)
@@ -363,12 +370,18 @@ are the ones directly set by PgBouncer.
- [`server_reset_query`](https://www.pgbouncer.org/config.html#server_reset_query)
- [`server_reset_query_always`](https://www.pgbouncer.org/config.html#server_reset_query_always)
- [`server_round_robin`](https://www.pgbouncer.org/config.html#server_round_robin)
+- [`server_tls_ciphers`](https://www.pgbouncer.org/config.html#server_tls_ciphers)
+- [`server_tls_server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols)
- [`stats_period`](https://www.pgbouncer.org/config.html#stats_period)
+- [`suspend_timeout`](https://www.pgbouncer.org/config.html#suspend_timeout)
+- [`tcp_defer_accept`](https://www.pgbouncer.org/config.html#tcp_defer_accept)
- [`tcp_keepalive`](https://www.pgbouncer.org/config.html#tcp_keepalive)
- [`tcp_keepcnt`](https://www.pgbouncer.org/config.html#tcp_keepcnt)
- [`tcp_keepidle`](https://www.pgbouncer.org/config.html#tcp_keepidle)
- [`tcp_keepintvl`](https://www.pgbouncer.org/config.html#tcp_keepintvl)
- [`tcp_user_timeout`](https://www.pgbouncer.org/config.html#tcp_user_timeout)
+- [`tcp_socket_buffer`](https://www.pgbouncer.org/config.html#tcp_socket_buffer)
+- [`track_extra_parameters`](https://www.pgbouncer.org/config.html#track_extra_parameters)
- [`verbose`](https://www.pgbouncer.org/config.html#verbose)
Customizations of the PgBouncer configuration are written declaratively in the
diff --git a/docs/src/samples/pooler-tls.yaml b/docs/src/samples/pooler-tls.yaml
index 9b58b2d364..20bffa1115 100644
--- a/docs/src/samples/pooler-tls.yaml
+++ b/docs/src/samples/pooler-tls.yaml
@@ -10,3 +10,5 @@ spec:
type: rw
pgbouncer:
poolMode: session
+ parameters:
+ server_tls_protocols: tlsv1.3
From 0f153716c403805e2530c0e43585a526c59ce68b Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Wed, 4 Dec 2024 13:27:53 +0100
Subject: [PATCH 191/836] fix
Signed-off-by: Gabriele Bartolini
---
internal/cmd/plugin/backup/cmd.go | 4 ++--
internal/cmd/plugin/destroy/cmd.go | 4 ++--
internal/cmd/plugin/fence/cmd.go | 8 ++++----
internal/cmd/plugin/hibernate/cmd.go | 12 ++++++------
internal/cmd/plugin/maintenance/cmd.go | 4 ++--
internal/cmd/plugin/pgbench/cmd.go | 2 +-
internal/cmd/plugin/pgbench/cmd_test.go | 2 +-
internal/cmd/plugin/promote/cmd.go | 4 ++--
internal/cmd/plugin/psql/cmd.go | 2 +-
internal/cmd/plugin/reload/cmd.go | 2 +-
internal/cmd/plugin/status/cmd.go | 2 +-
11 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go
index 17aceabe31..af8bbf8b3d 100644
--- a/internal/cmd/plugin/backup/cmd.go
+++ b/internal/cmd/plugin/backup/cmd.go
@@ -72,7 +72,7 @@ func NewCmd() *cobra.Command {
}
backupSubcommand := &cobra.Command{
- Use: "backup [cluster]",
+ Use: "backup CLUSTER",
Short: "Request an on-demand backup for a PostgreSQL Cluster",
GroupID: plugin.GroupIDDatabase,
Args: plugin.RequiresArguments(1),
@@ -167,7 +167,7 @@ func NewCmd() *cobra.Command {
"backup-name",
"",
"The name of the Backup resource that will be created, "+
- "defaults to \"[cluster]-[current_timestamp]\"",
+ "defaults to \"CLUSTER-CURRENT_TIMESTAMP\"",
)
backupSubcommand.Flags().StringVarP(
&backupTarget,
diff --git a/internal/cmd/plugin/destroy/cmd.go b/internal/cmd/plugin/destroy/cmd.go
index c3f4e7c944..a1bf87b665 100644
--- a/internal/cmd/plugin/destroy/cmd.go
+++ b/internal/cmd/plugin/destroy/cmd.go
@@ -29,8 +29,8 @@ import (
// NewCmd create the new "destroy" subcommand
func NewCmd() *cobra.Command {
destroyCmd := &cobra.Command{
- Use: "destroy [cluster] [node]",
- Short: "Destroy the instance named [cluster]-[node] or [node] with the associated PVC",
+ Use: "destroy CLUSTER INSTANCE",
+ Short: "Destroy the instance named CLUSTER-INSTANCE with the associated PVC",
GroupID: plugin.GroupIDCluster,
Args: plugin.RequiresArguments(2),
RunE: func(cmd *cobra.Command, args []string) error {
diff --git a/internal/cmd/plugin/fence/cmd.go b/internal/cmd/plugin/fence/cmd.go
index 8b3e719936..ab7bd6b8f7 100644
--- a/internal/cmd/plugin/fence/cmd.go
+++ b/internal/cmd/plugin/fence/cmd.go
@@ -27,8 +27,8 @@ import (
var (
fenceOnCmd = &cobra.Command{
- Use: "on [cluster] [node]",
- Short: `Fence an instance named [cluster]-[node] or [node]`,
+ Use: "on CLUSTER INSTANCE",
+ Short: `Fence an instance named CLUSTER-INSTANCE`,
Args: plugin.RequiresArguments(2),
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
@@ -42,8 +42,8 @@ var (
}
fenceOffCmd = &cobra.Command{
- Use: "off [cluster] [node]",
- Short: `Remove fence for an instance named [cluster]-[node] or [node]`,
+ Use: "off CLUSTER INSTANCE",
+ Short: `Remove fence for an instance named CLUSTER-INSTANCE`,
Args: plugin.RequiresArguments(2),
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go
index 44f6c32a4c..134c5412ff 100644
--- a/internal/cmd/plugin/hibernate/cmd.go
+++ b/internal/cmd/plugin/hibernate/cmd.go
@@ -26,8 +26,8 @@ import (
var (
hibernateOnCmd = &cobra.Command{
- Use: "on [cluster]",
- Short: "Hibernates the cluster named [cluster]",
+ Use: "on CLUSTER",
+ Short: "Hibernates the cluster named CLUSTER",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
@@ -49,8 +49,8 @@ var (
}
hibernateOffCmd = &cobra.Command{
- Use: "off [cluster]",
- Short: "Bring the cluster named [cluster] back from hibernation",
+ Use: "off CLUSTER",
+ Short: "Bring the cluster named CLUSTER back from hibernation",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
@@ -63,8 +63,8 @@ var (
}
hibernateStatusCmd = &cobra.Command{
- Use: "status [cluster]",
- Short: "Prints the hibernation status for the [cluster]",
+ Use: "status CLUSTER",
+ Short: "Prints the hibernation status for the CLUSTER",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
diff --git a/internal/cmd/plugin/maintenance/cmd.go b/internal/cmd/plugin/maintenance/cmd.go
index 39064884d6..39a9d735da 100644
--- a/internal/cmd/plugin/maintenance/cmd.go
+++ b/internal/cmd/plugin/maintenance/cmd.go
@@ -37,7 +37,7 @@ func NewCmd() *cobra.Command {
}
maintenanceCmd.AddCommand(&cobra.Command{
- Use: "set [cluster]",
+ Use: "set CLUSTER",
Short: "Sets maintenance mode",
Long: "This command will set maintenance mode on a single cluster or on all clusters " +
"in the current namespace if not specified differently through flags",
@@ -58,7 +58,7 @@ func NewCmd() *cobra.Command {
})
maintenanceCmd.AddCommand(&cobra.Command{
- Use: "unset [cluster]",
+ Use: "unset CLUSTER",
Short: "Removes maintenance mode",
Long: "This command will unset maintenance mode on a single cluster or on all clusters " +
"in the current namespace if not specified differently through flags",
diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go
index 7f79ed4c4a..394b238546 100644
--- a/internal/cmd/plugin/pgbench/cmd.go
+++ b/internal/cmd/plugin/pgbench/cmd.go
@@ -29,7 +29,7 @@ func NewCmd() *cobra.Command {
run := &pgBenchRun{}
pgBenchCmd := &cobra.Command{
- Use: "pgbench [cluster] [-- pgBenchCommandArgs...]",
+ Use: "pgbench CLUSTER [-- PGBENCH_COMMAND_ARGS...]",
Short: "Creates a pgbench job",
Args: validateCommandArgs,
Long: "Creates a pgbench job to run against the specified Postgres Cluster.",
diff --git a/internal/cmd/plugin/pgbench/cmd_test.go b/internal/cmd/plugin/pgbench/cmd_test.go
index 68ec5ebf6f..b1e539d24f 100644
--- a/internal/cmd/plugin/pgbench/cmd_test.go
+++ b/internal/cmd/plugin/pgbench/cmd_test.go
@@ -27,7 +27,7 @@ var _ = Describe("NewCmd", func() {
It("should create a cobra.Command with correct defaults", func() {
cmd := NewCmd()
- Expect(cmd.Use).To(Equal("pgbench [cluster] [-- pgBenchCommandArgs...]"))
+ Expect(cmd.Use).To(Equal("pgbench CLUSTER [-- PGBENCH_COMMAND_ARGS...]"))
Expect(cmd.Short).To(Equal("Creates a pgbench job"))
Expect(cmd.Long).To(Equal("Creates a pgbench job to run against the specified Postgres Cluster."))
Expect(cmd.Example).To(Equal(jobExample))
diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go
index f4f3c95d88..401e75d949 100644
--- a/internal/cmd/plugin/promote/cmd.go
+++ b/internal/cmd/plugin/promote/cmd.go
@@ -29,8 +29,8 @@ import (
// NewCmd create the new "promote" subcommand
func NewCmd() *cobra.Command {
promoteCmd := &cobra.Command{
- Use: "promote [cluster] [node]",
- Short: "Promote the pod named [cluster]-[node] or [node] to primary",
+ Use: "promote CLUSTER INSTANCE",
+ Short: "Promote the pod named CLUSTER-INSTANCE to primary",
GroupID: plugin.GroupIDCluster,
Args: plugin.RequiresArguments(2),
RunE: func(_ *cobra.Command, args []string) error {
diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go
index 8cae04a0ea..6a2bfb6cf1 100644
--- a/internal/cmd/plugin/psql/cmd.go
+++ b/internal/cmd/plugin/psql/cmd.go
@@ -31,7 +31,7 @@ func NewCmd() *cobra.Command {
var passStdin bool
cmd := &cobra.Command{
- Use: "psql [cluster] [-- psqlArgs...]",
+ Use: "psql CLUSTER [-- PSQL_ARGS...]",
Short: "Start a psql session targeting a CloudNativePG cluster",
Args: validatePsqlArgs,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
diff --git a/internal/cmd/plugin/reload/cmd.go b/internal/cmd/plugin/reload/cmd.go
index f31aa3ddef..833e1eae2b 100644
--- a/internal/cmd/plugin/reload/cmd.go
+++ b/internal/cmd/plugin/reload/cmd.go
@@ -27,7 +27,7 @@ import (
// NewCmd creates the new "reset" command
func NewCmd() *cobra.Command {
restartCmd := &cobra.Command{
- Use: "reload [clusterName]",
+ Use: "reload CLUSTER",
Short: `Reload a cluster`,
Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`,
GroupID: plugin.GroupIDCluster,
diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go
index 4ddbc5db02..385f50e66f 100644
--- a/internal/cmd/plugin/status/cmd.go
+++ b/internal/cmd/plugin/status/cmd.go
@@ -28,7 +28,7 @@ import (
// NewCmd create the new "status" subcommand
func NewCmd() *cobra.Command {
statusCmd := &cobra.Command{
- Use: "status [cluster]",
+ Use: "status CLUSTER",
Short: "Get the status of a PostgreSQL cluster",
Args: plugin.RequiresArguments(1),
GroupID: plugin.GroupIDDatabase,
From 2e634fbdc4326a7fae1a8c64a46ac42d333cb249 Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Wed, 4 Dec 2024 13:30:12 +0100
Subject: [PATCH 192/836] docs(plugin): standardize the `CLUSTER` argument in
plugin commands (#6253)
This update ensures consistent usage of the `CLUSTER` argument across
all plugin commands and their associated documentation.
Closes #5848
Signed-off-by: Pierrick
Signed-off-by: Zekiye Aydemir
Signed-off-by: Gabriele Bartolini
Co-authored-by: Zekiye Aydemir
Co-authored-by: Gabriele Bartolini
---
docs/src/kubectl-plugin.md | 102 +++++++++---------
.../plugin/logical/publication/create/cmd.go | 2 +-
.../plugin/logical/publication/drop/cmd.go | 2 +-
.../plugin/logical/subscription/create/cmd.go | 2 +-
.../plugin/logical/subscription/drop/cmd.go | 2 +-
.../logical/subscription/syncsequences/cmd.go | 2 +-
internal/cmd/plugin/logs/cluster.go | 2 +-
internal/cmd/plugin/pgbench/cmd.go | 6 +-
internal/cmd/plugin/pgbench/pgbench.go | 8 +-
internal/cmd/plugin/promote/cmd.go | 2 +-
internal/cmd/plugin/report/cluster.go | 2 +-
internal/cmd/plugin/restart/cmd.go | 2 +-
internal/cmd/plugin/snapshot/cmd.go | 2 +-
13 files changed, 68 insertions(+), 68 deletions(-)
mode change 100755 => 100644 docs/src/kubectl-plugin.md
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
old mode 100755
new mode 100644
index e066bbc082..2b430dd9e0
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -169,7 +169,7 @@ sudo mv kubectl_complete-cnpg /usr/local/bin
Once the plugin is installed and deployed, you can start using it like this:
```sh
-kubectl cnpg
+kubectl cnpg COMMAND [ARGS...]
```
!!! Note
@@ -346,16 +346,16 @@ The command also supports output in `yaml` and `json` format.
### Promote
The meaning of this command is to `promote` a pod in the cluster to primary, so you
-can start with maintenance work or test a switch-over situation in your cluster
+can start with maintenance work or test a switch-over situation in your cluster:
```sh
-kubectl cnpg promote cluster-example cluster-example-2
+kubectl cnpg promote CLUSTER CLUSTER-INSTANCE
```
-Or you can use the instance node number to promote
+Or you can use the instance node number to promote:
```sh
-kubectl cnpg promote cluster-example 2
+kubectl cnpg promote CLUSTER INSTANCE
```
### Certificates
@@ -364,13 +364,13 @@ Clusters created using the CloudNativePG operator work with a CA to sign
a TLS authentication certificate.
To get a certificate, you need to provide a name for the secret to store
-the credentials, the cluster name, and a user for this certificate
+the credentials, the cluster name, and a user for this certificate:
```sh
-kubectl cnpg certificate cluster-cert --cnpg-cluster cluster-example --cnpg-user appuser
+kubectl cnpg certificate cluster-cert --cnpg-cluster CLUSTER --cnpg-user USER
```
-After the secret it's created, you can get it using `kubectl`
+After the secret it's created, you can get it using `kubectl`:
```sh
kubectl get secret cluster-cert
@@ -388,7 +388,7 @@ The `kubectl cnpg restart` command can be used in two cases:
- requesting the operator to orchestrate a rollout restart
for a certain cluster. This is useful to apply
- configuration changes to cluster dependent objects, such as ConfigMaps
+ configuration changes to cluster dependent objects, such as `ConfigMaps`
containing custom monitoring queries.
- request a single instance restart, either in-place if the instance is
@@ -397,10 +397,10 @@ The `kubectl cnpg restart` command can be used in two cases:
```sh
# this command will restart a whole cluster in a rollout fashion
-kubectl cnpg restart [clusterName]
+kubectl cnpg restart CLUSTER
# this command will restart a single instance, according to the policy above
-kubectl cnpg restart [clusterName] [pod]
+kubectl cnpg restart CLUSTER INSTANCE
```
If the in-place restart is requested but the change cannot be applied without
@@ -420,7 +420,7 @@ to cluster dependent objects, such as ConfigMaps containing custom monitoring qu
The following command will reload all configurations for a given cluster:
```sh
-kubectl cnpg reload [cluster_name]
+kubectl cnpg reload CLUSTER
```
### Maintenance
@@ -503,7 +503,7 @@ default time-stamped filename is created for the zip file.
E.g. the default installation namespace is cnpg-system
```sh
-kubectl cnpg report operator -n
+kubectl cnpg report operator -n cnpg-system
```
results in
@@ -515,7 +515,7 @@ Successfully written report to "report_operator_.zip" (format: "yaml"
With the `-f` flag set:
```sh
-kubectl cnpg report operator -n -f reportRedacted.zip
+kubectl cnpg report operator -n cnpg-system -f reportRedacted.zip
```
Unzipping the file will produce a time-stamped top-level folder to keep the
@@ -592,7 +592,7 @@ metadata:
With the `-S` (`--stopRedaction`) option activated, secrets are shown:
```sh
-kubectl cnpg report operator -n -f reportNonRedacted.zip -S
+kubectl cnpg report operator -n cnpg-system -f reportNonRedacted.zip -S
```
You'll get a reminder that you're about to view confidential information:
@@ -641,7 +641,7 @@ so the `-S` is disabled.
Usage:
```sh
-kubectl cnpg report cluster [flags]
+kubectl cnpg report cluster CLUSTER [flags]
```
Note that, unlike the `operator` sub-command, for the `cluster` sub-command you
@@ -649,7 +649,7 @@ need to provide the cluster name, and very likely the namespace, unless the clus
is in the default one.
```sh
-kubectl cnpg report cluster example -f report.zip -n example_namespace
+kubectl cnpg report cluster CLUSTER -f report.zip [-n NAMESPACE]
```
and then:
@@ -671,7 +671,7 @@ Archive: report.zip
Remember that you can use the `--logs` flag to add the pod and job logs to the ZIP.
```sh
-kubectl cnpg report cluster example -n example_namespace --logs
+kubectl cnpg report cluster CLUSTER [-n NAMESPACE] --logs
```
will result in:
@@ -751,20 +751,20 @@ which takes `-f` to mean the logs should be followed.
Usage:
```sh
-kubectl cnpg logs cluster [flags]
+kubectl cnpg logs cluster CLUSTER [flags]
```
Using the `-f` option to follow:
```sh
-kubectl cnpg report cluster cluster-example -f
+kubectl cnpg report cluster CLUSTER -f
```
Using `--tail` option to display 3 lines from each pod and the `-f` option
to follow:
```sh
-kubectl cnpg report cluster cluster-example -f --tail 3
+kubectl cnpg report cluster CLUSTER -f --tail 3
```
```output
@@ -777,7 +777,7 @@ kubectl cnpg report cluster cluster-example -f --tail 3
With the `-o` option omitted, and with `--output` specified:
```console
-$ kubectl cnpg logs cluster cluster-example --output my-cluster.log
+$ kubectl cnpg logs cluster CLUSTER --output my-cluster.log
Successfully written logs to "my-cluster.log"
```
@@ -869,7 +869,7 @@ detached PVCs.
Usage:
```sh
-kubectl cnpg destroy [CLUSTER_NAME] [INSTANCE_ID]
+kubectl cnpg destroy CLUSTER INSTANCE
```
The following example removes the `cluster-example-2` pod and the associated
@@ -895,7 +895,7 @@ instance.
You can hibernate a cluster with:
```sh
-kubectl cnpg hibernate on
+kubectl cnpg hibernate on CLUSTER
```
This will:
@@ -918,13 +918,13 @@ In case of error the operator will not be able to revert the procedure. You can
still force the operation with:
```sh
-kubectl cnpg hibernate on cluster-example --force
+kubectl cnpg hibernate on CLUSTER --force
```
A hibernated cluster can be resumed with:
```sh
-kubectl cnpg hibernate off
+kubectl cnpg hibernate off CLUSTER
```
Once the cluster has been hibernated, it's possible to show the last
@@ -932,7 +932,7 @@ configuration and the status that PostgreSQL had after it was shut down.
That can be done with:
```sh
-kubectl cnpg hibernate status
+kubectl cnpg hibernate status CLUSTER
```
### Benchmarking the database with pgbench
@@ -941,7 +941,7 @@ Pgbench can be run against an existing PostgreSQL cluster with following
command:
```sh
-kubectl cnpg pgbench -- --time 30 --client 1 --jobs 1
+kubectl cnpg pgbench CLUSTER -- --time 30 --client 1 --jobs 1
```
Refer to the [Benchmarking pgbench section](benchmarking.md#pgbench) for more
@@ -949,10 +949,10 @@ details.
### Benchmarking the storage with fio
-fio can be run on an existing storage class with following command:
+`fio` can be run on an existing storage class with following command:
```sh
-kubectl cnpg fio -n
+kubectl cnpg fio FIO_JOB_NAME [-n NAMESPACE]
```
Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details.
@@ -965,13 +965,13 @@ an existing Postgres cluster by creating a new `Backup` resource.
The following example requests an on-demand backup for a given cluster:
```sh
-kubectl cnpg backup [cluster_name]
+kubectl cnpg backup CLUSTER
```
or, if using volume snapshots:
```sh
-kubectl cnpg backup [cluster_name] -m volumeSnapshot
+kubectl cnpg backup CLUSTER -m volumeSnapshot
```
The created backup will be named after the request time:
@@ -995,7 +995,7 @@ the configuration settings.
### Launching psql
-The `kubectl cnpg psql` command starts a new PostgreSQL interactive front-end
+The `kubectl cnpg psql CLUSTER` command starts a new PostgreSQL interactive front-end
process (psql) connected to an existing Postgres cluster, as if you were running
it from the actual pod. This means that you will be using the `postgres` user.
@@ -1136,20 +1136,20 @@ command. The basic structure of this command is as follows:
```sh
kubectl cnpg publication create \
- --publication \
- [--external-cluster ]
- [options]
+ --publication PUBLICATION_NAME \
+ [--external-cluster EXTERNAL_CLUSTER]
+ LOCAL_CLUSTER [options]
```
There are two primary use cases:
- With `--external-cluster`: Use this option to create a publication on an
external cluster (i.e. defined in the `externalClusters` stanza). The commands
- will be issued from the ``, but the publication will be for the
- data in ``.
+ will be issued from the `LOCAL_CLUSTER`, but the publication will be for the
+ data in `EXTERNAL_CLUSTER`.
- Without `--external-cluster`: Use this option to create a publication in the
- `` PostgreSQL `Cluster` (by default, the `app` database).
+ `LOCAL_CLUSTER` PostgreSQL `Cluster` (by default, the `app` database).
!!! Warning
When connecting to an external cluster, ensure that the specified user has
@@ -1215,9 +1215,9 @@ following command structure:
```sh
kubectl cnpg publication drop \
- --publication \
- [--external-cluster ]
- [options]
+ --publication PUBLICATION_NAME \
+ [--external-cluster EXTERNAL_CLUSTER]
+ LOCAL_CLUSTER [options]
```
To access further details and precise instructions, use the following command:
@@ -1253,15 +1253,15 @@ command. The basic structure of this command is as follows:
```sh
kubectl cnpg subscription create \
- --subscription \
- --publication \
- --external-cluster \
- [options]
+ --subscription SUBSCRIPTION_NAME \
+ --publication PUBLICATION_NAME \
+ --external-cluster EXTERNAL_CLUSTER \
+ LOCAL_CLUSTER [options]
```
This command configures a subscription directed towards the specified
publication in the designated external cluster, as defined in the
-`externalClusters` stanza of the ``.
+`externalClusters` stanza of the `LOCAL_CLUSTER`.
For additional information and detailed instructions, type the following
command:
@@ -1303,8 +1303,8 @@ You can drop a `SUBSCRIPTION` with the following command structure:
```sh
kubectl cnpg subcription drop \
- --subscription \
- [options]
+ --subscription SUBSCRIPTION_NAME \
+ LOCAL_CLUSTER [options]
```
To access further details and precise instructions, use the following command:
@@ -1332,8 +1332,8 @@ You can use the command as shown below:
```sh
kubectl cnpg subscription sync-sequences \
- --subscription \
-
+ --subscription SUBSCRIPTION_NAME \
+ LOCAL_CLUSTER
```
For comprehensive details and specific instructions, utilize the following
diff --git a/internal/cmd/plugin/logical/publication/create/cmd.go b/internal/cmd/plugin/logical/publication/create/cmd.go
index 966500911b..3e7847e682 100644
--- a/internal/cmd/plugin/logical/publication/create/cmd.go
+++ b/internal/cmd/plugin/logical/publication/create/cmd.go
@@ -38,7 +38,7 @@ func NewCmd() *cobra.Command {
var dryRun bool
publicationCreateCmd := &cobra.Command{
- Use: "create cluster_name",
+ Use: "create CLUSTER",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
diff --git a/internal/cmd/plugin/logical/publication/drop/cmd.go b/internal/cmd/plugin/logical/publication/drop/cmd.go
index 6d27c7a955..b7d1166c5c 100644
--- a/internal/cmd/plugin/logical/publication/drop/cmd.go
+++ b/internal/cmd/plugin/logical/publication/drop/cmd.go
@@ -35,7 +35,7 @@ func NewCmd() *cobra.Command {
var dryRun bool
publicationDropCmd := &cobra.Command{
- Use: "drop cluster_name",
+ Use: "drop CLUSTER",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
diff --git a/internal/cmd/plugin/logical/subscription/create/cmd.go b/internal/cmd/plugin/logical/subscription/create/cmd.go
index 9ca7508f9e..9c234d8ddc 100644
--- a/internal/cmd/plugin/logical/subscription/create/cmd.go
+++ b/internal/cmd/plugin/logical/subscription/create/cmd.go
@@ -37,7 +37,7 @@ func NewCmd() *cobra.Command {
var dryRun bool
subscriptionCreateCmd := &cobra.Command{
- Use: "create cluster_name",
+ Use: "create CLUSTER",
Short: "create a logical replication subscription",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
diff --git a/internal/cmd/plugin/logical/subscription/drop/cmd.go b/internal/cmd/plugin/logical/subscription/drop/cmd.go
index bba02c68bc..1ec557ad5f 100644
--- a/internal/cmd/plugin/logical/subscription/drop/cmd.go
+++ b/internal/cmd/plugin/logical/subscription/drop/cmd.go
@@ -34,7 +34,7 @@ func NewCmd() *cobra.Command {
var dryRun bool
subscriptionDropCmd := &cobra.Command{
- Use: "drop cluster_name",
+ Use: "drop CLUSTER",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go
index fbb6230794..c88f62d0ee 100644
--- a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go
+++ b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go
@@ -36,7 +36,7 @@ func NewCmd() *cobra.Command {
var offset int
syncSequencesCmd := &cobra.Command{
- Use: "sync-sequences cluster_name",
+ Use: "sync-sequences CLUSTER",
Short: "synchronize the sequences from the source database",
Args: plugin.RequiresArguments(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
diff --git a/internal/cmd/plugin/logs/cluster.go b/internal/cmd/plugin/logs/cluster.go
index 0249ab9357..3ba9e7d4b2 100644
--- a/internal/cmd/plugin/logs/cluster.go
+++ b/internal/cmd/plugin/logs/cluster.go
@@ -26,7 +26,7 @@ func clusterCmd() *cobra.Command {
cl := clusterLogs{}
cmd := &cobra.Command{
- Use: "cluster ",
+ Use: "cluster CLUSTER",
Short: "Logs for cluster's pods",
Long: "Collects the logs for all pods in a cluster into a single stream or outputFile",
Args: plugin.RequiresArguments(1),
diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go
index 394b238546..11260d37d1 100644
--- a/internal/cmd/plugin/pgbench/cmd.go
+++ b/internal/cmd/plugin/pgbench/cmd.go
@@ -47,14 +47,14 @@ func NewCmd() *cobra.Command {
&run.jobName,
"job-name",
"",
- "Name of the job, defaulting to: -pgbench-xxxx",
+ "Name of the job, defaulting to: CLUSTER-pgbench-xxxx",
)
pgBenchCmd.Flags().StringVar(
&run.jobName,
"pgbench-job-name",
"",
- "Name of the job, defaulting to: -pgbench-xxxx",
+ "Name of the job, defaulting to: CLUSTER-pgbench-xxxx",
)
pgBenchCmd.Flags().StringVar(
@@ -88,7 +88,7 @@ func validateCommandArgs(cmd *cobra.Command, args []string) error {
}
if cmd.ArgsLenAtDash() > 1 {
- return fmt.Errorf("pgBenchCommands should be passed after the -- delimiter")
+ return fmt.Errorf("PGBENCH_COMMAND_ARGS should be passed after the -- delimiter")
}
return nil
diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go
index a1b92eec4f..0887374219 100644
--- a/internal/cmd/plugin/pgbench/pgbench.go
+++ b/internal/cmd/plugin/pgbench/pgbench.go
@@ -47,17 +47,17 @@ const (
)
var jobExample = `
- # Dry-run command with default values and clusterName "cluster-example"
+ # Dry-run command with default values and [cluster] "cluster-example"
kubectl-cnpg pgbench cluster-example --dry-run
- # Create a pgbench job with default values and clusterName "cluster-example"
+ # Create a pgbench job with default values and [cluster] "cluster-example"
kubectl-cnpg pgbench cluster-example
- # Dry-run command with given values and clusterName "cluster-example"
+ # Dry-run command with given values and [cluster] "cluster-example"
kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name --dry-run -- \
--time 30 --client 1 --jobs 1
- # Create a job with given values and clusterName "cluster-example"
+ # Create a job with given values and [cluster] "cluster-example"
kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name -- \
--time 30 --client 1 --jobs 1`
diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go
index 401e75d949..111c4291b0 100644
--- a/internal/cmd/plugin/promote/cmd.go
+++ b/internal/cmd/plugin/promote/cmd.go
@@ -30,7 +30,7 @@ import (
func NewCmd() *cobra.Command {
promoteCmd := &cobra.Command{
Use: "promote CLUSTER INSTANCE",
- Short: "Promote the pod named CLUSTER-INSTANCE to primary",
+ Short: "Promote the instance named CLUSTER-INSTANCE to primary",
GroupID: plugin.GroupIDCluster,
Args: plugin.RequiresArguments(2),
RunE: func(_ *cobra.Command, args []string) error {
diff --git a/internal/cmd/plugin/report/cluster.go b/internal/cmd/plugin/report/cluster.go
index 5e5920db37..fa76af6461 100644
--- a/internal/cmd/plugin/report/cluster.go
+++ b/internal/cmd/plugin/report/cluster.go
@@ -32,7 +32,7 @@ func clusterCmd() *cobra.Command {
const filePlaceholder = "report_cluster__.zip"
cmd := &cobra.Command{
- Use: "cluster ",
+ Use: "cluster CLUSTER",
Short: "Report cluster resources, pods, events, logs (opt-in)",
Long: "Collects combined information on the cluster in a Zip file",
Args: plugin.RequiresArguments(1),
diff --git a/internal/cmd/plugin/restart/cmd.go b/internal/cmd/plugin/restart/cmd.go
index c27b66a989..28ef5e31df 100644
--- a/internal/cmd/plugin/restart/cmd.go
+++ b/internal/cmd/plugin/restart/cmd.go
@@ -28,7 +28,7 @@ import (
// NewCmd creates the new "reset" command
func NewCmd() *cobra.Command {
restartCmd := &cobra.Command{
- Use: "restart clusterName [instance]",
+ Use: "restart CLUSTER [INSTANCE]",
Short: `Restart a cluster or a single instance in a cluster`,
Long: `If only the cluster name is specified, the whole cluster will be restarted,
rolling out new configurations if present.
diff --git a/internal/cmd/plugin/snapshot/cmd.go b/internal/cmd/plugin/snapshot/cmd.go
index ed20dc669f..17039dcc7d 100644
--- a/internal/cmd/plugin/snapshot/cmd.go
+++ b/internal/cmd/plugin/snapshot/cmd.go
@@ -28,7 +28,7 @@ import (
// NewCmd implements the `snapshot` subcommand
func NewCmd() *cobra.Command {
cmd := &cobra.Command{
- Use: "snapshot ",
+ Use: "snapshot CLUSTER",
Short: "DEPRECATED (use `backup -m volumeSnapshot` instead)",
Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`",
GroupID: plugin.GroupIDDatabase,
From 67739cc70331a8572c9af8691ab285e86919c717 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Wed, 4 Dec 2024 14:33:35 +0100
Subject: [PATCH 193/836] feat(plugin): report CNPG-I plugins in `status`
command (#6232)
Closes #6230
Signed-off-by: Armando Ruocco
Signed-off-by: Gabriele Bartolini
Co-authored-by: Gabriele Bartolini
---
internal/cmd/plugin/status/status.go | 63 +++++++++++++++++++++++++++-
1 file changed, 62 insertions(+), 1 deletion(-)
diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go
index 8b05a67d5c..c55035a8db 100644
--- a/internal/cmd/plugin/status/status.go
+++ b/internal/cmd/plugin/status/status.go
@@ -27,8 +27,9 @@ import (
"time"
"github.com/cheynewallace/tabby"
+ "github.com/cloudnative-pg/cnpg-i/pkg/identity"
"github.com/cloudnative-pg/machinery/pkg/stringset"
- types "github.com/cloudnative-pg/machinery/pkg/types"
+ "github.com/cloudnative-pg/machinery/pkg/types"
"github.com/logrusorgru/aurora/v4"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
@@ -143,6 +144,7 @@ func Status(
status.printPodDisruptionBudgetStatus()
}
status.printInstancesStatus()
+ status.printPluginStatus(verbosity)
if len(errs) > 0 {
fmt.Println()
@@ -751,6 +753,7 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() {
continue
}
status.Print()
+ fmt.Println()
}
func (fullStatus *PostgresqlStatus) printCertificatesStatus() {
@@ -1147,6 +1150,64 @@ func (fullStatus *PostgresqlStatus) printTablespacesStatus() {
fmt.Println()
}
+func (fullStatus *PostgresqlStatus) printPluginStatus(verbosity int) {
+ const header = "Plugins status"
+
+ parseCapabilities := func(capabilities []string) string {
+ if len(capabilities) == 0 {
+ return "N/A"
+ }
+
+ result := make([]string, len(capabilities))
+ for idx, capability := range capabilities {
+ switch capability {
+ case identity.PluginCapability_Service_TYPE_BACKUP_SERVICE.String():
+ result[idx] = "Backup Service"
+ case identity.PluginCapability_Service_TYPE_RESTORE_JOB.String():
+ result[idx] = "Restore Job"
+ case identity.PluginCapability_Service_TYPE_RECONCILER_HOOKS.String():
+ result[idx] = "Reconciler Hooks"
+ case identity.PluginCapability_Service_TYPE_WAL_SERVICE.String():
+ result[idx] = "WAL Service"
+ case identity.PluginCapability_Service_TYPE_OPERATOR_SERVICE.String():
+ result[idx] = "Operator Service"
+ case identity.PluginCapability_Service_TYPE_LIFECYCLE_SERVICE.String():
+ result[idx] = "Lifecycle Service"
+ case identity.PluginCapability_Service_TYPE_UNSPECIFIED.String():
+ continue
+ default:
+ result[idx] = capability
+ }
+ }
+
+ return strings.Join(result, ", ")
+ }
+
+ if len(fullStatus.Cluster.Status.PluginStatus) == 0 {
+ if verbosity > 0 {
+ fmt.Println(aurora.Green(header))
+ fmt.Println("No plugins found")
+ }
+ return
+ }
+
+ fmt.Println(aurora.Green(header))
+
+ status := tabby.New()
+ status.AddHeader("Name", "Version", "Status", "Reported Operator Capabilities")
+
+ for _, plg := range fullStatus.Cluster.Status.PluginStatus {
+ plgStatus := "N/A"
+ if plg.Status != "" {
+ plgStatus = plg.Status
+ }
+ status.AddLine(plg.Name, plg.Version, plgStatus, parseCapabilities(plg.Capabilities))
+ }
+
+ status.Print()
+ fmt.Println()
+}
+
func getPrimaryStartTime(cluster *apiv1.Cluster) string {
return getPrimaryStartTimeIdempotent(cluster, time.Now())
}
From fa365a1162d74a7b546c7710633e9701b0199a98 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Wed, 4 Dec 2024 15:42:14 +0100
Subject: [PATCH 194/836] chore(database): align field names with `initdb`
section (#6245)
Closes #6244
Signed-off-by: Marco Nenciarini
Signed-off-by: Jaime Silvela
Co-authored-by: Jaime Silvela
---
.wordlist-en-custom.txt | 2 +-
api/v1/database_types.go | 45 ++++++++----
.../bases/postgresql.cnpg.io_databases.yaml | 69 +++++++++++++------
docs/src/cloudnative-pg.v1.md | 36 ++++++----
docs/src/samples/database-example-icu.yaml | 6 +-
...e-with-delete-reclaim-policy.yaml.template | 4 +-
.../database.yaml.template | 4 +-
7 files changed, 109 insertions(+), 57 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 8cf809cd19..cea36722e8 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -1,4 +1,3 @@
-
AES
API's
APIs
@@ -645,6 +644,7 @@ cn
cnp
cnpg
codeready
+collationVersion
columnValue
commandError
commandOutput
diff --git a/api/v1/database_types.go b/api/v1/database_types.go
index 12786a38f6..5e6ecd834a 100644
--- a/api/v1/database_types.go
+++ b/api/v1/database_types.go
@@ -36,6 +36,9 @@ const (
)
// DatabaseSpec is the specification of a Postgresql Database
+// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`"
+// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`"
+// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`"
type DatabaseSpec struct {
// The corresponding cluster
ClusterRef corev1.LocalObjectReference `json:"cluster"`
@@ -67,44 +70,56 @@ type DatabaseSpec struct {
Encoding string `json:"encoding,omitempty"`
// The locale (cannot be changed)
+ // Sets the default collation order and character classification in the new database.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable"
// +optional
Locale string `json:"locale,omitempty"`
- // The locale provider (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale_provider is immutable"
+ // The LOCALE_PROVIDER (cannot be changed)
+ // This option sets the locale provider for databases created in the new cluster.
+ // Available from PostgreSQL 16.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeProvider is immutable"
// +optional
- LocaleProvider string `json:"locale_provider,omitempty"`
+ LocaleProvider string `json:"localeProvider,omitempty"`
// The LC_COLLATE (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="lc_collate is immutable"
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCollate is immutable"
// +optional
- LcCollate string `json:"lc_collate,omitempty"`
+ LcCollate string `json:"localeCollate,omitempty"`
// The LC_CTYPE (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="lc_ctype is immutable"
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCType is immutable"
// +optional
- LcCtype string `json:"lc_ctype,omitempty"`
+ LcCtype string `json:"localeCType,omitempty"`
// The ICU_LOCALE (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icu_locale is immutable"
+ // Specifies the ICU locale when the ICU provider is used.
+ // This option requires `localeProvider` to be set to `icu`.
+ // Available from PostgreSQL 15.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuLocale is immutable"
// +optional
- IcuLocale string `json:"icu_locale,omitempty"`
+ IcuLocale string `json:"icuLocale,omitempty"`
// The ICU_RULES (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icu_rules is immutable"
+ // Specifies additional collation rules to customize the behavior of the default collation.
+ // This option requires `localeProvider` to be set to `icu`.
+ // Available from PostgreSQL 16.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuRules is immutable"
// +optional
- IcuRules string `json:"icu_rules,omitempty"`
+ IcuRules string `json:"icuRules,omitempty"`
// The BUILTIN_LOCALE (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtin_locale is immutable"
+ // Specifies the locale name when the builtin provider is used.
+ // This option requires `localeProvider` to be set to `builtin`.
+ // Available from PostgreSQL 17.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtinLocale is immutable"
// +optional
- BuiltinLocale string `json:"builtin_locale,omitempty"`
+ BuiltinLocale string `json:"builtinLocale,omitempty"`
// The COLLATION_VERSION (cannot be changed)
- // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collation_version is immutable"
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collationVersion is immutable"
// +optional
- CollationVersion string `json:"collation_version,omitempty"`
+ CollationVersion string `json:"collationVersion,omitempty"`
// True when the database is a template
// +optional
diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml
index d50fb58224..7a1d7c8066 100644
--- a/config/crd/bases/postgresql.cnpg.io_databases.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml
@@ -61,11 +61,15 @@ spec:
allowConnections:
description: True when connections to this database are allowed
type: boolean
- builtin_locale:
- description: The BUILTIN_LOCALE (cannot be changed)
+ builtinLocale:
+ description: |-
+ The BUILTIN_LOCALE (cannot be changed)
+ Specifies the locale name when the builtin provider is used.
+ This option requires `localeProvider` to be set to `builtin`.
+ Available from PostgreSQL 17.
type: string
x-kubernetes-validations:
- - message: builtin_locale is immutable
+ - message: builtinLocale is immutable
rule: self == oldSelf
cluster:
description: The corresponding cluster
@@ -81,11 +85,11 @@ spec:
type: string
type: object
x-kubernetes-map-type: atomic
- collation_version:
+ collationVersion:
description: The COLLATION_VERSION (cannot be changed)
type: string
x-kubernetes-validations:
- - message: collation_version is immutable
+ - message: collationVersion is immutable
rule: self == oldSelf
connectionLimit:
description: |-
@@ -113,44 +117,57 @@ spec:
- present
- absent
type: string
- icu_locale:
- description: The ICU_LOCALE (cannot be changed)
+ icuLocale:
+ description: |-
+ The ICU_LOCALE (cannot be changed)
+ Specifies the ICU locale when the ICU provider is used.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 15.
type: string
x-kubernetes-validations:
- - message: icu_locale is immutable
+ - message: icuLocale is immutable
rule: self == oldSelf
- icu_rules:
- description: The ICU_RULES (cannot be changed)
+ icuRules:
+ description: |-
+ The ICU_RULES (cannot be changed)
+ Specifies additional collation rules to customize the behavior of the default collation.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 16.
type: string
x-kubernetes-validations:
- - message: icu_rules is immutable
+ - message: icuRules is immutable
rule: self == oldSelf
isTemplate:
description: True when the database is a template
type: boolean
- lc_collate:
- description: The LC_COLLATE (cannot be changed)
+ locale:
+ description: |-
+ The locale (cannot be changed)
+ Sets the default collation order and character classification in the new database.
type: string
x-kubernetes-validations:
- - message: lc_collate is immutable
+ - message: locale is immutable
rule: self == oldSelf
- lc_ctype:
+ localeCType:
description: The LC_CTYPE (cannot be changed)
type: string
x-kubernetes-validations:
- - message: lc_ctype is immutable
+ - message: localeCType is immutable
rule: self == oldSelf
- locale:
- description: The locale (cannot be changed)
+ localeCollate:
+ description: The LC_COLLATE (cannot be changed)
type: string
x-kubernetes-validations:
- - message: locale is immutable
+ - message: localeCollate is immutable
rule: self == oldSelf
- locale_provider:
- description: The locale provider (cannot be changed)
+ localeProvider:
+ description: |-
+ The LOCALE_PROVIDER (cannot be changed)
+ This option sets the locale provider for databases created in the new cluster.
+ Available from PostgreSQL 16.
type: string
x-kubernetes-validations:
- - message: locale_provider is immutable
+ - message: localeProvider is immutable
rule: self == oldSelf
name:
description: The name inside PostgreSQL
@@ -182,6 +199,14 @@ spec:
- name
- owner
type: object
+ x-kubernetes-validations:
+ - message: builtinLocale is only available when localeProvider is set
+ to `builtin`
+ rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin'''
+ - message: icuLocale is only available when localeProvider is set to `icu`
+ rule: '!has(self.icuLocale) || self.localeProvider == ''icu'''
+ - message: icuRules is only available when localeProvider is set to `icu`
+ rule: '!has(self.icuRules) || self.localeProvider == ''icu'''
status:
description: |-
Most recently observed status of the Database. This data may not be up to
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index ea07a0bed8..4854ff8faf 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -2466,52 +2466,64 @@ PostgreSQL cluster from an existing storage
string
- The locale (cannot be changed)
+ The locale (cannot be changed)
+Sets the default collation order and character classification in the new database.
-locale_provider
+localeProvider
string
- The locale provider (cannot be changed)
+ The LOCALE_PROVIDER (cannot be changed)
+This option sets the locale provider for databases created in the new cluster.
+Available from PostgreSQL 16.
-lc_collate
+localeCollate
string
The LC_COLLATE (cannot be changed)
-lc_ctype
+localeCType
string
The LC_CTYPE (cannot be changed)
-icu_locale
+icuLocale
string
- The ICU_LOCALE (cannot be changed)
+ The ICU_LOCALE (cannot be changed)
+Specifies the ICU locale when the ICU provider is used.
+This option requires localeProvider to be set to icu.
+Available from PostgreSQL 15.
-icu_rules
+icuRules
string
- The ICU_RULES (cannot be changed)
+ The ICU_RULES (cannot be changed)
+Specifies additional collation rules to customize the behavior of the default collation.
+This option requires localeProvider to be set to icu.
+Available from PostgreSQL 16.
-builtin_locale
+builtinLocale
string
- The BUILTIN_LOCALE (cannot be changed)
+ The BUILTIN_LOCALE (cannot be changed)
+Specifies the locale name when the builtin provider is used.
+This option requires localeProvider to be set to builtin.
+Available from PostgreSQL 17.
-collation_version
+collationVersion
string
diff --git a/docs/src/samples/database-example-icu.yaml b/docs/src/samples/database-example-icu.yaml
index 7a6bba7e4d..fdfd367921 100644
--- a/docs/src/samples/database-example-icu.yaml
+++ b/docs/src/samples/database-example-icu.yaml
@@ -8,9 +8,9 @@ spec:
name: declarative-icu
owner: app
encoding: UTF8
- locale_provider: icu
- icu_locale: en
- icu_rules: fr
+ localeProvider: icu
+ icuLocale: en
+ icuRules: fr
template: template0
cluster:
name: cluster-example
diff --git a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template
index 0ce2071609..be0f6c7e23 100644
--- a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template
+++ b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template
@@ -5,8 +5,8 @@ metadata:
spec:
name: declarative
owner: app
- lc_ctype: C
- lc_collate: C
+ localeCType: C
+ localeCollate: C
encoding: UTF8
databaseReclaimPolicy: delete
cluster:
diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template
index 75f2107bcc..a3ae25d8b3 100644
--- a/tests/e2e/fixtures/declarative_databases/database.yaml.template
+++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template
@@ -5,8 +5,8 @@ metadata:
spec:
name: declarative
owner: app
- lc_ctype: "en_US.utf8"
- lc_collate: C
+ localeCType: "en_US.utf8"
+ localeCollate: C
encoding: SQL_ASCII
template: template0
cluster:
From 45a147bafedf3baa61d059dcf9780c5eadc6e03c Mon Sep 17 00:00:00 2001
From: Francesco Canovai
Date: Wed, 4 Dec 2024 18:43:55 +0100
Subject: [PATCH 195/836] fix: correct path for partial wal archiver (#6255)
Use a relative file name for the partial wal archived during a replica
switchover. This fixes an issue with backup plugin, that received the
absolute file path prefixed by the $PGDATA value. The in-tree archiver
uses only the base name of the file.
Closes #6256
Signed-off-by: Francesco Canovai
---
pkg/management/postgres/webserver/remote.go | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go
index 7b9d75becd..95eab40892 100644
--- a/pkg/management/postgres/webserver/remote.go
+++ b/pkg/management/postgres/webserver/remote.go
@@ -386,24 +386,27 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req
return
}
- pgWalDirectory := path.Join(os.Getenv("PGDATA"), "pg_wal")
- walFilPath := path.Join(pgWalDirectory, walFile)
- partialWalFilePath := fmt.Sprintf("%s.partial", walFilPath)
+ pgData := os.Getenv("PGDATA")
+ walRelativePath := path.Join("pg_wal", walFile)
+ partialWalFileRelativePath := fmt.Sprintf("%s.partial", walRelativePath)
+ walFileAbsolutePath := path.Join(pgData, walRelativePath)
+ partialWalFileAbsolutePath := path.Join(pgData, partialWalFileRelativePath)
- if err := os.Link(walFilPath, partialWalFilePath); err != nil {
+ if err := os.Link(walFileAbsolutePath, partialWalFileAbsolutePath); err != nil {
log.Error(err, "failed to get pg_controldata")
sendBadRequestJSONResponse(w, "ERROR_WHILE_CREATING_SYMLINK", err.Error())
return
}
defer func() {
- if err := fileutils.RemoveFile(partialWalFilePath); err != nil {
+ if err := fileutils.RemoveFile(partialWalFileAbsolutePath); err != nil {
log.Error(err, "while deleting the partial wal file symlink")
}
}()
- options := []string{constants.WalArchiveCommand, partialWalFilePath}
+ options := []string{constants.WalArchiveCommand, partialWalFileRelativePath}
walArchiveCmd := exec.Command("/controller/manager", options...) // nolint: gosec
+ walArchiveCmd.Dir = pgData
if err := execlog.RunBuffering(walArchiveCmd, "wal-archive-partial"); err != nil {
sendBadRequestJSONResponse(w, "ERROR_WHILE_EXECUTING_WAL_ARCHIVE", err.Error())
return
From f95015ab8ddd3d78c3bf5a08e164c499fbafc02d Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Wed, 4 Dec 2024 22:50:12 +0100
Subject: [PATCH 196/836] feat: clean up logging of database, publication,
subscriptio controllers (#6268)
This update enhances the logging mechanisms for the database, publication,
and subscription controllers. It ensures more consistent and informative
log messages, making debugging and monitoring easier.
Closes #5524
Signed-off-by: Jaime Silvela
Signed-off-by: wolfox
Signed-off-by: Armando Ruocco
Co-authored-by: wolfox
Co-authored-by: Armando Ruocco
---
.../management/controller/database_controller.go | 16 ++++++++++------
.../controller/publication_controller.go | 16 ++++++++++------
.../controller/subscription_controller.go | 16 ++++++++++------
3 files changed, 30 insertions(+), 18 deletions(-)
diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go
index a82f8ce5a1..f1283f2d9c 100644
--- a/internal/management/controller/database_controller.go
+++ b/internal/management/controller/database_controller.go
@@ -66,12 +66,9 @@ const databaseReconciliationInterval = 30 * time.Second
// Reconcile is the database reconciliation loop
func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- contextLogger := log.FromContext(ctx)
-
- contextLogger.Debug("Reconciliation loop start")
- defer func() {
- contextLogger.Debug("Reconciliation loop end")
- }()
+ contextLogger := log.FromContext(ctx).
+ WithName("database_reconciler").
+ WithValues("databaseName", req.Name)
// Get the database object
var database apiv1.Database
@@ -115,6 +112,11 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
+ contextLogger.Info("Reconciling database")
+ defer func() {
+ contextLogger.Info("Reconciliation loop of database exited")
+ }()
+
// Still not for me, we're waiting for a switchover
if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary {
return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
@@ -165,6 +167,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
ctx,
&database,
); err != nil {
+ contextLogger.Error(err, "while reconciling database")
return r.failedReconciliation(
ctx,
&database,
@@ -172,6 +175,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
)
}
+ contextLogger.Info("Reconciliation of database completed")
return r.succeededReconciliation(
ctx,
&database,
diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go
index f9d1bc8bd9..9758ff690d 100644
--- a/internal/management/controller/publication_controller.go
+++ b/internal/management/controller/publication_controller.go
@@ -58,12 +58,9 @@ const publicationReconciliationInterval = 30 * time.Second
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile
func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- contextLogger := log.FromContext(ctx)
-
- contextLogger.Debug("Reconciliation loop start")
- defer func() {
- contextLogger.Debug("Reconciliation loop end")
- }()
+ contextLogger := log.FromContext(ctx).
+ WithName("publication_reconciler").
+ WithValues("publicationName", req.Name)
// Get the publication object
var publication apiv1.Publication
@@ -105,6 +102,11 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
}
+ contextLogger.Info("Reconciling publication")
+ defer func() {
+ contextLogger.Info("Reconciliation loop of publication exited")
+ }()
+
// Cannot do anything on a replica cluster
if cluster.IsReplica() {
if err := markAsUnknown(ctx, r.Client, &publication, errClusterIsReplica); err != nil {
@@ -121,12 +123,14 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
if err := r.alignPublication(ctx, &publication); err != nil {
+ contextLogger.Error(err, "while reconciling publication")
if err := markAsFailed(ctx, r.Client, &publication, err); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
}
+ contextLogger.Info("Reconciliation of publication completed")
if err := markAsReady(ctx, r.Client, &publication); err != nil {
return ctrl.Result{}, err
}
diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go
index f1a3af65bf..8019c3dd2b 100644
--- a/internal/management/controller/subscription_controller.go
+++ b/internal/management/controller/subscription_controller.go
@@ -51,12 +51,9 @@ const subscriptionReconciliationInterval = 30 * time.Second
// Reconcile is the subscription reconciliation loop
func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- contextLogger := log.FromContext(ctx)
-
- contextLogger.Debug("Reconciliation loop start")
- defer func() {
- contextLogger.Debug("Reconciliation loop end")
- }()
+ contextLogger := log.FromContext(ctx).
+ WithName("subscription_reconciler").
+ WithValues("subscriptionName", req.Name)
// Get the subscription object
var subscription apiv1.Subscription
@@ -98,6 +95,11 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
}
+ contextLogger.Info("Reconciling subscription")
+ defer func() {
+ contextLogger.Info("Reconciliation loop of subscription exited")
+ }()
+
// Cannot do anything on a replica cluster
if cluster.IsReplica() {
if err := markAsUnknown(ctx, r.Client, &subscription, errClusterIsReplica); err != nil {
@@ -127,12 +129,14 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request
}
if err := r.alignSubscription(ctx, &subscription, connString); err != nil {
+ contextLogger.Error(err, "while reconciling subscription")
if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
}
+ contextLogger.Info("Reconciliation of subscription completed")
if err := markAsReady(ctx, r.Client, &subscription); err != nil {
return ctrl.Result{}, err
}
From 4ce42ef29d278e28efd391dd579cc240f4a5b185 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Wed, 4 Dec 2024 23:54:36 +0100
Subject: [PATCH 197/836] feat(probes): enable customization of startup,
liveness, and readiness probes (#6266)
This patch enables users to customize the default behavior of readiness,
liveness, and startup probes implemented by CloudNativePG by introducing
the `.spec.probes` section.
Users are responsible for ensuring that any custom probe settings align
with the operational requirements of the cluster to avoid unintended
disruptions.
Closes: #4852
Signed-off-by: Leonardo Cecchi
Signed-off-by: Gabriele Bartolini
Signed-off-by: Ben Healey
Co-authored-by: Gabriele Bartolini
Co-authored-by: Ben Healey
---
.wordlist-en-custom.txt | 8 +
api/v1/cluster_funcs.go | 15 ++
api/v1/cluster_funcs_test.go | 44 ++++
api/v1/cluster_types.go | 56 ++++++
api/v1/zz_generated.deepcopy.go | 55 +++++
.../bases/postgresql.cnpg.io_clusters.yaml | 153 ++++++++++++++
docs/src/cloudnative-pg.v1.md | 122 +++++++++++
docs/src/failure_modes.md | 30 ++-
docs/src/instance_manager.md | 153 ++++++++++++--
docs/src/operator_capability_levels.md | 30 +--
pkg/specs/pods.go | 27 ++-
pkg/specs/podspec_diff.go | 3 +
pkg/specs/podspec_diff_test.go | 53 +++++
tests/e2e/probes_test.go | 190 ++++++++++++++++++
14 files changed, 888 insertions(+), 51 deletions(-)
create mode 100644 tests/e2e/probes_test.go
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index cea36722e8..f5fe08815f 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -342,6 +342,8 @@ PrimaryUpdateMethod
PrimaryUpdateStrategy
PriorityClass
PriorityClassName
+ProbeTerminationGracePeriod
+ProbesConfiguration
ProjectedVolumeSource
PublicationReclaimPolicy
PublicationSpec
@@ -778,6 +780,7 @@ facto
failover
failoverDelay
failovers
+failureThreshold
faq
fastpath
fb
@@ -854,6 +857,7 @@ inheritedMetadata
init
initDB
initdb
+initialDelaySeconds
initialise
initializingPVC
inplace
@@ -1028,6 +1032,7 @@ passwordSecret
passwordStatus
pc
pdf
+periodSeconds
persistentvolumeclaim
persistentvolumeclaims
pgAdmin
@@ -1260,6 +1265,7 @@ subdirectory
subresource
subscriptionReclaimPolicy
substatement
+successThreshold
successfullyExtracted
sudo
superuserSecret
@@ -1302,11 +1308,13 @@ tbody
tcp
td
temporaryData
+terminationGracePeriodSeconds
th
thead
timeLineID
timeframes
timelineID
+timeoutSeconds
tls
tmp
tmpfs
diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go
index 093cb41837..3ffba1a3da 100644
--- a/api/v1/cluster_funcs.go
+++ b/api/v1/cluster_funcs.go
@@ -1444,3 +1444,18 @@ func (target *RecoveryTarget) BuildPostgresOptions() string {
return result
}
+
+// ApplyInto applies the content of the probe configuration in a Kubernetes
+// probe
+func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) {
+ if p == nil {
+ return
+ }
+
+ k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds
+ k8sProbe.TimeoutSeconds = p.TimeoutSeconds
+ k8sProbe.PeriodSeconds = p.PeriodSeconds
+ k8sProbe.SuccessThreshold = p.SuccessThreshold
+ k8sProbe.FailureThreshold = p.FailureThreshold
+ k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds
+}
diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go
index c478c2b3ae..34c67f363f 100644
--- a/api/v1/cluster_funcs_test.go
+++ b/api/v1/cluster_funcs_test.go
@@ -26,6 +26,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
@@ -1682,3 +1683,46 @@ var _ = Describe("UpdateBackupTimes", func() {
To(Equal(now))
})
})
+
+var _ = Describe("Probes configuration", func() {
+ originalProbe := corev1.Probe{
+ ProbeHandler: corev1.ProbeHandler{
+ HTTPGet: &corev1.HTTPGetAction{
+ Path: "/",
+ Port: intstr.FromInt32(23),
+ },
+ },
+
+ InitialDelaySeconds: 21,
+ PeriodSeconds: 11,
+ FailureThreshold: 433,
+ TerminationGracePeriodSeconds: ptr.To[int64](23),
+ }
+
+ It("Does not change any field if the configuration is nil", func() {
+ var nilProbe *Probe
+ configuredProbe := originalProbe.DeepCopy()
+ nilProbe.ApplyInto(configuredProbe)
+ Expect(originalProbe).To(BeEquivalentTo(*configuredProbe))
+ })
+
+ It("Changes the corresponding fields", func() {
+ config := &Probe{
+ InitialDelaySeconds: 1,
+ TimeoutSeconds: 2,
+ PeriodSeconds: 3,
+ SuccessThreshold: 4,
+ FailureThreshold: 5,
+ TerminationGracePeriodSeconds: nil,
+ }
+
+ configuredProbe := originalProbe.DeepCopy()
+ config.ApplyInto(configuredProbe)
+ Expect(configuredProbe.InitialDelaySeconds).To(Equal(config.InitialDelaySeconds))
+ Expect(configuredProbe.TimeoutSeconds).To(Equal(config.TimeoutSeconds))
+ Expect(configuredProbe.PeriodSeconds).To(Equal(config.PeriodSeconds))
+ Expect(configuredProbe.SuccessThreshold).To(Equal(config.SuccessThreshold))
+ Expect(configuredProbe.FailureThreshold).To(Equal(config.FailureThreshold))
+ Expect(configuredProbe.TerminationGracePeriodSeconds).To(BeNil())
+ })
+})
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index affbfe0e60..2814f18cb6 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -476,6 +476,62 @@ type ClusterSpec struct {
// any plugin to be loaded with the corresponding configuration
// +optional
Plugins PluginConfigurationList `json:"plugins,omitempty"`
+
+ // The configuration of the probes to be injected
+ // in the PostgreSQL Pods.
+ // +optional
+ Probes *ProbesConfiguration `json:"probes,omitempty"`
+}
+
+// ProbesConfiguration represent the configuration for the probes
+// to be injected in the PostgreSQL Pods
+type ProbesConfiguration struct {
+ // The startup probe configuration
+ Startup *Probe `json:"startup,omitempty"`
+
+ // The liveness probe configuration
+ Liveness *Probe `json:"liveness,omitempty"`
+
+ // The readiness probe configuration
+ Readiness *Probe `json:"readiness,omitempty"`
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+type Probe struct {
+ // Number of seconds after the container has started before liveness probes are initiated.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ // +optional
+ InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"`
+ // Number of seconds after which the probe times out.
+ // Defaults to 1 second. Minimum value is 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ // +optional
+ TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"`
+ // How often (in seconds) to perform the probe.
+ // Default to 10 seconds. Minimum value is 1.
+ // +optional
+ PeriodSeconds int32 `json:"periodSeconds,omitempty"`
+ // Minimum consecutive successes for the probe to be considered successful after having failed.
+ // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ // +optional
+ SuccessThreshold int32 `json:"successThreshold,omitempty"`
+ // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ // Defaults to 3. Minimum value is 1.
+ // +optional
+ FailureThreshold int32 `json:"failureThreshold,omitempty"`
+ // Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ // The grace period is the duration in seconds after the processes running in the pod are sent
+ // a termination signal and the time when the processes are forcibly halted with a kill signal.
+ // Set this value longer than the expected cleanup time for your process.
+ // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ // value overrides the value provided by the pod spec.
+ // Value must be non-negative integer. The value zero indicates stop immediately via
+ // the kill signal (no opportunity to shut down).
+ // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ // +optional
+ TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
}
// PluginConfigurationList represent a set of plugin with their
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index 014362a084..fbaec944e2 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -843,6 +843,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.Probes != nil {
+ in, out := &in.Probes, &out.Probes
+ *out = new(ProbesConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -2216,6 +2221,56 @@ func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Probe) DeepCopyInto(out *Probe) {
+ *out = *in
+ if in.TerminationGracePeriodSeconds != nil {
+ in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
+ *out = new(int64)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe.
+func (in *Probe) DeepCopy() *Probe {
+ if in == nil {
+ return nil
+ }
+ out := new(Probe)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProbesConfiguration) DeepCopyInto(out *ProbesConfiguration) {
+ *out = *in
+ if in.Startup != nil {
+ in, out := &in.Startup, &out.Startup
+ *out = new(Probe)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Liveness != nil {
+ in, out := &in.Liveness, &out.Liveness
+ *out = new(Probe)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Readiness != nil {
+ in, out := &in.Readiness, &out.Readiness
+ *out = new(Probe)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbesConfiguration.
+func (in *ProbesConfiguration) DeepCopy() *ProbesConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ProbesConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Publication) DeepCopyInto(out *Publication) {
*out = *in
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 17242a5ec7..e185082fa7 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -4221,6 +4221,159 @@ spec:
https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
for more information
type: string
+ probes:
+ description: |-
+ The configuration of the probes to be injected
+ in the PostgreSQL Pods.
+ properties:
+ liveness:
+ description: The liveness probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ readiness:
+ description: The readiness probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ startup:
+ description: The startup probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ type: object
projectedVolumeTemplate:
description: |-
Template to be used to define projected volumes, projected volumes will be mounted
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 4854ff8faf..7a5b1c193f 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -1932,6 +1932,14 @@ development/staging purposes.
any plugin to be loaded with the corresponding configuration
+probes
+ProbesConfiguration
+
+
+ The configuration of the probes to be injected
+in the PostgreSQL Pods.
+
+
@@ -4111,6 +4119,120 @@ the primary server of the cluster as part of rolling updates
+## Probe {#postgresql-cnpg-io-v1-Probe}
+
+
+**Appears in:**
+
+- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration)
+
+
+Probe describes a health check to be performed against a container to determine whether it is
+alive or ready to receive traffic.
+
+
+
+Field Description
+
+initialDelaySeconds
+int32
+
+
+ Number of seconds after the container has started before liveness probes are initiated.
+More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+
+
+timeoutSeconds
+int32
+
+
+ Number of seconds after which the probe times out.
+Defaults to 1 second. Minimum value is 1.
+More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+
+
+periodSeconds
+int32
+
+
+ How often (in seconds) to perform the probe.
+Default to 10 seconds. Minimum value is 1.
+
+
+successThreshold
+int32
+
+
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+
+
+failureThreshold
+int32
+
+
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+Defaults to 3. Minimum value is 1.
+
+
+terminationGracePeriodSeconds
+int64
+
+
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+The grace period is the duration in seconds after the processes running in the pod are sent
+a termination signal and the time when the processes are forcibly halted with a kill signal.
+Set this value longer than the expected cleanup time for your process.
+If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+value overrides the value provided by the pod spec.
+Value must be non-negative integer. The value zero indicates stop immediately via
+the kill signal (no opportunity to shut down).
+This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+
+
+
+
+
+## ProbesConfiguration {#postgresql-cnpg-io-v1-ProbesConfiguration}
+
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec)
+
+
+ProbesConfiguration represent the configuration for the probes
+to be injected in the PostgreSQL Pods
+
+
+
+Field Description
+
+startup [Required]
+Probe
+
+
+ The startup probe configuration
+
+
+liveness [Required]
+Probe
+
+
+ The liveness probe configuration
+
+
+readiness [Required]
+Probe
+
+
+ The readiness probe configuration
+
+
+
+
+
## PublicationReclaimPolicy {#postgresql-cnpg-io-v1-PublicationReclaimPolicy}
(Alias of `string`)
diff --git a/docs/src/failure_modes.md b/docs/src/failure_modes.md
index 3f9b746f3c..4dd6df6c9b 100644
--- a/docs/src/failure_modes.md
+++ b/docs/src/failure_modes.md
@@ -99,26 +99,24 @@ kubectl delete pod [primary pod] --grace-period=1
triggers a failover promoting the most aligned standby, without
the guarantee that the primary had been shut down.
+### Liveness Probe Failure
-### Readiness probe failure
+By default, after three consecutive liveness probe failures, the `postgres`
+container will be considered failed. The Pod will remain part of the `Cluster`,
+but the *kubelet* will attempt to restart the failed container. If the issue
+causing the failure persists and cannot be resolved, you can manually delete
+the Pod.
-After 3 failures, the pod will be considered *not ready*. The pod will still
-be part of the `Cluster`, no new pod will be created.
+In both cases, self-healing occurs automatically once the underlying issues are
+resolved.
-If the cause of the failure can't be fixed, it is possible to delete the pod
-manually. Otherwise, the pod will resume the previous role when the failure
-is solved.
+### Readiness Probe Failure
-Self-healing will happen after three failures of the probe.
-
-### Liveness probe failure
-
-After 3 failures, the `postgres` container will be considered failed. The
-pod will still be part of the `Cluster`, and the *kubelet* will try to restart
-the container. If the cause of the failure can't be fixed, it is possible
-to delete the pod manually.
-
-Self-healing will happen after three failures of the probe.
+By default, after three consecutive readiness probe failures, the Pod will be
+marked as *not ready*. It will remain part of the `Cluster`, and no new Pod
+will be created. If the issue causing the failure cannot be resolved, you can
+manually delete the Pod. Once the failure is addressed, the Pod will
+automatically regain its previous role.
### Worker node drained
diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md
index c8c924f4fe..ce13adbab2 100644
--- a/docs/src/instance_manager.md
+++ b/docs/src/instance_manager.md
@@ -18,35 +18,144 @@ of the Pod, the instance manager acts as a backend to handle the
## Startup, liveness and readiness probes
The startup and liveness probes rely on `pg_isready`, while the readiness
-probe checks if the database is up and able to accept connections using the
-superuser credentials.
+probe checks if the database is up and able to accept connections.
-The readiness probe is positive when the Pod is ready to accept traffic.
-The liveness probe controls when to restart the container once
-the startup probe interval has elapsed.
+### Startup Probe
-!!! Important
- The liveness and readiness probes will report a failure if the probe command
- fails three times with a 10-second interval between each check.
+The `.spec.startDelay` parameter specifies the delay (in seconds) before the
+liveness probe activates after a PostgreSQL Pod starts. By default, this is set
+to `3600` seconds. You should adjust this value based on the time PostgreSQL
+requires to fully initialize in your environment.
+
+!!! Warning
+ Setting `.spec.startDelay` too low can cause the liveness probe to activate
+ prematurely, potentially resulting in unnecessary Pod restarts if PostgreSQL
+ hasn’t fully initialized.
+
+CloudNativePG configures the startup probe with the following default parameters:
+
+```yaml
+failureThreshold: FAILURE_THRESHOLD
+periodSeconds: 10
+successThreshold: 1
+timeoutSeconds: 5
+```
+
+Here, `FAILURE_THRESHOLD` is calculated as `startDelay` divided by
+`periodSeconds`.
+
+If the default behavior based on `startDelay` is not suitable for your use
+case, you can take full control of the startup probe by specifying custom
+parameters in the `.spec.probes.startup` stanza. Note that defining this stanza
+will override the default behavior, including the use of `startDelay`.
+
+!!! Warning
+ Ensure that any custom probe settings are aligned with your cluster’s
+ operational requirements to prevent unintended disruptions.
+
+!!! Info
+ For detailed information about probe configuration, refer to the
+ [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
-The liveness probe detects if the PostgreSQL instance is in a
-broken state and needs to be restarted. The value in `startDelay` is used
-to delay the probe's execution, preventing an
-instance with a long startup time from being restarted.
+For example, the following configuration bypasses `startDelay` entirely:
-The amount of time needed for a Pod to be classified as not alive is
-configurable in the `.spec.livenessProbeTimeout` parameter, that
-defaults to 30 seconds.
+```yaml
+# ... snip
+spec:
+ probes:
+ startup:
+ periodSeconds: 3
+ timeoutSeconds: 3
+ failureThreshold: 10
+```
-The interval (in seconds) after the Pod has started before the liveness
-probe starts working is expressed in the `.spec.startDelay` parameter,
-which defaults to 3600 seconds. The correct value for your cluster is
-related to the time needed by PostgreSQL to start.
+### Liveness Probe
+
+The liveness probe begins after the startup probe succeeds and is responsible
+for detecting if the PostgreSQL instance has entered a broken state that
+requires a restart of the pod.
+
+The amount of time before a Pod is classified as not alive is configurable via
+the `.spec.livenessProbeTimeout` parameter.
+
+CloudNativePG configures the liveness probe with the following default
+parameters:
+
+```yaml
+failureThreshold: FAILURE_THRESHOLD
+periodSeconds: 10
+successThreshold: 1
+timeoutSeconds: 5
+```
+
+Here, `FAILURE_THRESHOLD` is calculated as `livenessProbeTimeout` divided by
+`periodSeconds`.
+
+By default, `.spec.livenessProbeTimeout` is set to `30` seconds. This means the
+liveness probe will report a failure if it detects three consecutive probe
+failures, with a 10-second interval between each check.
+
+If the default behavior using `livenessProbeTimeout` does not meet your needs,
+you can fully customize the liveness probe by defining parameters in the
+`.spec.probes.liveness` stanza. Keep in mind that specifying this stanza will
+override the default behavior, including the use of `livenessProbeTimeout`.
!!! Warning
- If `.spec.startDelay` is too low, the liveness probe will start working
- before the PostgreSQL startup is complete, and the Pod could be restarted
- prematurely.
+ Ensure that any custom probe settings are aligned with your cluster’s
+ operational requirements to prevent unintended disruptions.
+
+!!! Info
+ For more details on probe configuration, refer to the
+ [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
+
+For example, the following configuration overrides the default behavior and
+bypasses `livenessProbeTimeout`:
+
+```yaml
+# ... snip
+spec:
+ probes:
+ liveness:
+ periodSeconds: 3
+ timeoutSeconds: 3
+ failureThreshold: 10
+```
+
+### Readiness Probe
+
+The readiness probe determines when a pod running a PostgreSQL instance is
+prepared to accept traffic and serve requests.
+
+CloudNativePG uses the following default configuration for the readiness probe:
+
+```yaml
+failureThreshold: 3
+periodSeconds: 10
+successThreshold: 1
+timeoutSeconds: 5
+```
+
+If the default settings do not suit your requirements, you can fully customize
+the readiness probe by specifying parameters in the `.spec.probes.readiness`
+stanza. For example:
+
+```yaml
+# ... snip
+spec:
+ probes:
+ readiness:
+ periodSeconds: 3
+ timeoutSeconds: 3
+ failureThreshold: 10
+```
+
+!!! Warning
+ Ensure that any custom probe settings are aligned with your cluster’s
+ operational requirements to prevent unintended disruptions.
+
+!!! Info
+ For more information on configuring probes, see the
+ [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
## Shutdown control
diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md
index 5008d33b8c..c961a27154 100644
--- a/docs/src/operator_capability_levels.md
+++ b/docs/src/operator_capability_levels.md
@@ -494,18 +494,24 @@ scalability of PostgreSQL databases, ensuring a streamlined and optimized
experience for managing large scale data storage in cloud-native environments.
Support for temporary tablespaces is also included.
-### Liveness and readiness probes
-
-The operator defines liveness and readiness probes for the Postgres
-containers that are then invoked by the kubelet. They're mapped respectively
-to the `/healthz` and `/readyz` endpoints of the web server managed
-directly by the instance manager.
-
-The liveness probe is based on the `pg_isready` executable, and the pod is
-considered healthy with exit codes 0 (server accepting connections normally)
-and 1 (server is rejecting connections, for example, during startup). The
-readiness probe issues a simple query (`;`) to verify that the server is
-ready to accept connections.
+### Startup, Liveness, and Readiness Probes
+
+CloudNativePG configures startup, liveness, and readiness probes for PostgreSQL
+containers, which are managed by the Kubernetes kubelet. These probes interact
+with the `/healthz` and `/readyz` endpoints exposed by the instance manager's
+web server to monitor the Pod's health and readiness.
+
+The startup and liveness probes use the `pg_isready` utility. A Pod is
+considered healthy if `pg_isready` returns an exit code of 0 (indicating the
+server is accepting connections) or 1 (indicating the server is rejecting
+connections, such as during startup).
+
+The readiness probe executes a simple SQL query (`;`) to verify that the
+PostgreSQL server is ready to accept client connections.
+
+All probes are configured with default settings but can be fully customized to
+meet specific needs, allowing for fine-tuning to align with your environment
+and workloads.
### Rolling deployments
diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go
index 8e3e5ed44e..3fe970313b 100644
--- a/pkg/specs/pods.go
+++ b/pkg/specs/pods.go
@@ -198,6 +198,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable
Env: envConfig.EnvVars,
EnvFrom: envConfig.EnvFrom,
VolumeMounts: createPostgresVolumeMounts(cluster),
+ // This is the default startup probe, and can be overridden
+ // the user configuration in cluster.spec.probes.startup
StartupProbe: &corev1.Probe{
FailureThreshold: getStartupProbeFailureThreshold(cluster.GetMaxStartDelay()),
PeriodSeconds: StartupProbePeriod,
@@ -209,6 +211,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable
},
},
},
+ // This is the default readiness probe, and can be overridden
+ // by the user configuration in cluster.spec.probes.readiness
ReadinessProbe: &corev1.Probe{
TimeoutSeconds: 5,
PeriodSeconds: ReadinessProbePeriod,
@@ -219,6 +223,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable
},
},
},
+ // This is the default liveness probe, and can be overridden
+ // by the user configuration in cluster.spec.probes.liveness
LivenessProbe: &corev1.Probe{
PeriodSeconds: LivenessProbePeriod,
TimeoutSeconds: 5,
@@ -272,10 +278,14 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable
// if user customizes the liveness probe timeout, we need to adjust the failure threshold
addLivenessProbeFailureThreshold(cluster, &containers[0])
+ // use the custom probe configuration if provided
+ ensureCustomProbesConfiguration(&cluster, &containers[0])
+
return containers
}
-// adjust the liveness probe failure threshold based on the `spec.livenessProbeTimeout` value
+// addLivenessProbeFailureThreshold adjusts the liveness probe failure threshold
+// based on the `spec.livenessProbeTimeout` value
func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.Container) {
if cluster.Spec.LivenessProbeTimeout != nil {
timeout := *cluster.Spec.LivenessProbeTimeout
@@ -283,6 +293,21 @@ func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.C
}
}
+// ensureCustomProbesConfiguration applies the custom probe configuration
+// if specified inside the cluster specification
+func ensureCustomProbesConfiguration(cluster *apiv1.Cluster, container *corev1.Container) {
+ // No probes configuration
+ if cluster.Spec.Probes == nil {
+ return
+ }
+
+ // There's no need to check for nils here because a nil probe specification
+ // will result in no change in the Kubernetes probe.
+ cluster.Spec.Probes.Liveness.ApplyInto(container.LivenessProbe)
+ cluster.Spec.Probes.Readiness.ApplyInto(container.ReadinessProbe)
+ cluster.Spec.Probes.Startup.ApplyInto(container.StartupProbe)
+}
+
// getStartupProbeFailureThreshold get the startup probe failure threshold
// FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1
func getStartupProbeFailureThreshold(startupDelay int32) int32 {
diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go
index 54c9328d8a..ecd02fbfe0 100644
--- a/pkg/specs/podspec_diff.go
+++ b/pkg/specs/podspec_diff.go
@@ -176,6 +176,9 @@ func doContainersMatch(currentContainer, targetContainer corev1.Container) (bool
"liveness-probe": func() bool {
return reflect.DeepEqual(currentContainer.LivenessProbe, targetContainer.LivenessProbe)
},
+ "startup-probe": func() bool {
+ return reflect.DeepEqual(currentContainer.StartupProbe, targetContainer.StartupProbe)
+ },
"command": func() bool {
return reflect.DeepEqual(currentContainer.Command, targetContainer.Command)
},
diff --git a/pkg/specs/podspec_diff_test.go b/pkg/specs/podspec_diff_test.go
index 5869d68221..cfbd40e907 100644
--- a/pkg/specs/podspec_diff_test.go
+++ b/pkg/specs/podspec_diff_test.go
@@ -17,6 +17,8 @@ limitations under the License.
package specs
import (
+ corev1 "k8s.io/api/core/v1"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -37,4 +39,55 @@ var _ = Describe("PodSpecDiff", func() {
It("returns false for empty volume name", func() {
Expect(shouldIgnoreCurrentVolume("")).To(BeFalse())
})
+
+ It("return false when the startup probe do not match and true otherwise", func() {
+ containerPre := corev1.Container{
+ StartupProbe: &corev1.Probe{
+ TimeoutSeconds: 23,
+ },
+ }
+ containerPost := corev1.Container{
+ StartupProbe: &corev1.Probe{
+ TimeoutSeconds: 24,
+ },
+ }
+ Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue())
+ status, diff := doContainersMatch(containerPre, containerPost)
+ Expect(status).To(BeFalse())
+ Expect(diff).To(Equal("startup-probe"))
+ })
+
+ It("return false when the liveness probe do not match and true otherwise", func() {
+ containerPre := corev1.Container{
+ LivenessProbe: &corev1.Probe{
+ InitialDelaySeconds: 23,
+ },
+ }
+ containerPost := corev1.Container{
+ LivenessProbe: &corev1.Probe{
+ InitialDelaySeconds: 24,
+ },
+ }
+ Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue())
+ status, diff := doContainersMatch(containerPre, containerPost)
+ Expect(status).To(BeFalse())
+ Expect(diff).To(Equal("liveness-probe"))
+ })
+
+ It("return false when the readiness probe do not match and true otherwise", func() {
+ containerPre := corev1.Container{
+ ReadinessProbe: &corev1.Probe{
+ SuccessThreshold: 23,
+ },
+ }
+ containerPost := corev1.Container{
+ ReadinessProbe: &corev1.Probe{
+ SuccessThreshold: 24,
+ },
+ }
+ Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue())
+ status, diff := doContainersMatch(containerPre, containerPost)
+ Expect(status).To(BeFalse())
+ Expect(diff).To(Equal("readiness-probe"))
+ })
})
diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go
new file mode 100644
index 0000000000..9e7dae8567
--- /dev/null
+++ b/tests/e2e/probes_test.go
@@ -0,0 +1,190 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+// Set of tests in which we check that the configuration of the readiness probes is applied
+var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() {
+ const (
+ level = tests.High
+ )
+
+ BeforeEach(func() {
+ if testLevelEnv.Depth < int(level) {
+ Skip("Test depth is lower than the amount requested for this test")
+ }
+ })
+
+ It("can change the probes configuration", func(ctx SpecContext) {
+ var namespace string
+
+ const sampleFile = fixturesDir + "/base/cluster-storage-class.yaml.template"
+ const clusterName = "postgresql-storage-class"
+
+ // IMPORTANT: for this E2e to work, these values need to be different
+ // than the default Kubernetes settings
+ probeConfiguration := apiv1.Probe{
+ InitialDelaySeconds: 2,
+ PeriodSeconds: 4,
+ TimeoutSeconds: 8,
+ }
+ probesConfiguration := apiv1.ProbesConfiguration{
+ Startup: probeConfiguration.DeepCopy(),
+ Liveness: probeConfiguration.DeepCopy(),
+ Readiness: probeConfiguration.DeepCopy(),
+ }
+
+ assertProbeCoherentWithConfiguration := func(probe *corev1.Probe) {
+ Expect(probe.InitialDelaySeconds).To(BeEquivalentTo(probeConfiguration.InitialDelaySeconds))
+ Expect(probe.PeriodSeconds).To(BeEquivalentTo(probeConfiguration.PeriodSeconds))
+ Expect(probe.TimeoutSeconds).To(BeEquivalentTo(probeConfiguration.TimeoutSeconds))
+ }
+
+ assertProbesCoherentWithConfiguration := func(container *corev1.Container) {
+ assertProbeCoherentWithConfiguration(container.LivenessProbe)
+ assertProbeCoherentWithConfiguration(container.ReadinessProbe)
+ assertProbeCoherentWithConfiguration(container.LivenessProbe)
+ }
+
+ var defaultReadinessProbe *corev1.Probe
+ var defaultLivenessProbe *corev1.Probe
+ var defaultStartupProbe *corev1.Probe
+
+ By("creating an empty cluster", func() {
+ // Create a cluster in a namespace we'll delete after the test
+ const namespacePrefix = "probes"
+ var err error
+ namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ Expect(err).ToNot(HaveOccurred())
+
+ AssertCreateCluster(namespace, clusterName, sampleFile, env)
+ })
+
+ By("getting the default probes configuration", func() {
+ var pod corev1.Pod
+ err := env.Client.Get(ctx, client.ObjectKey{
+ Name: fmt.Sprintf("%s-1", clusterName),
+ Namespace: namespace,
+ }, &pod)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(pod.Spec.Containers[0].Name).To(Equal("postgres"))
+ defaultReadinessProbe = pod.Spec.Containers[0].ReadinessProbe.DeepCopy()
+ defaultLivenessProbe = pod.Spec.Containers[0].LivenessProbe.DeepCopy()
+ defaultStartupProbe = pod.Spec.Containers[0].StartupProbe.DeepCopy()
+ })
+
+ By("applying a probe configuration", func() {
+ var cluster apiv1.Cluster
+ err := env.Client.Get(ctx, client.ObjectKey{
+ Name: clusterName,
+ Namespace: namespace,
+ }, &cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ originalCluster := cluster.DeepCopy()
+ cluster.Spec.Probes = probesConfiguration.DeepCopy()
+
+ err = env.Client.Patch(ctx, &cluster, client.MergeFrom(originalCluster))
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ By("waiting for the cluster to restart", func() {
+ AssertClusterEventuallyReachesPhase(namespace, clusterName,
+ []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ })
+
+ By("checking the applied settings", func() {
+ var cluster apiv1.Cluster
+ err := env.Client.Get(ctx, client.ObjectKey{
+ Name: clusterName,
+ Namespace: namespace,
+ }, &cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, instance := range cluster.Status.InstanceNames {
+ var pod corev1.Pod
+ err := env.Client.Get(ctx, client.ObjectKey{
+ Name: instance,
+ Namespace: namespace,
+ }, &pod)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(pod.Spec.Containers[0].Name).To(Equal("postgres"))
+ assertProbesCoherentWithConfiguration(&pod.Spec.Containers[0])
+ }
+ })
+
+ By("reverting back the changes", func() {
+ var cluster apiv1.Cluster
+ err := env.Client.Get(ctx, client.ObjectKey{
+ Name: clusterName,
+ Namespace: namespace,
+ }, &cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ originalCluster := cluster.DeepCopy()
+ cluster.Spec.Probes = &apiv1.ProbesConfiguration{}
+
+ err = env.Client.Patch(ctx, &cluster, client.MergeFrom(originalCluster))
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ By("waiting for the cluster to restart", func() {
+ AssertClusterEventuallyReachesPhase(namespace, clusterName,
+ []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ })
+
+ By("checking the applied settings", func() {
+ var cluster apiv1.Cluster
+ err := env.Client.Get(ctx, client.ObjectKey{
+ Name: clusterName,
+ Namespace: namespace,
+ }, &cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, instance := range cluster.Status.InstanceNames {
+ var pod corev1.Pod
+ err = env.Client.Get(ctx, client.ObjectKey{
+ Name: instance,
+ Namespace: namespace,
+ }, &pod)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(pod.Spec.Containers[0].Name).To(Equal("postgres"))
+ Expect(pod.Spec.Containers[0].LivenessProbe).To(BeEquivalentTo(defaultLivenessProbe))
+ Expect(pod.Spec.Containers[0].ReadinessProbe).To(BeEquivalentTo(defaultReadinessProbe))
+ Expect(pod.Spec.Containers[0].StartupProbe).To(BeEquivalentTo(defaultStartupProbe))
+ }
+ })
+ })
+})
From e580344a5e5957e856ee3fb8d92af28b29518943 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Niccol=C3=B2=20Fei?=
Date: Thu, 5 Dec 2024 10:03:25 +0100
Subject: [PATCH 198/836] refactor: use existing error handling functions in
the `DatabaseReconciler` (#6212)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This patch updates the DatabaseReconciler code to match the structure
of the PublicationReconciler and SubscriptionReconciler. It utilizes the common
functions introduced in the Publications and Subscriptions implementation.
Additionally, the actions within the DatabaseReconciler have been reorganized
to align with the operational flow of the PublicationReconciler and
SubscriptionReconciler.
Closes #5927
Signed-off-by: Niccolò Fei
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Co-authored-by: Jaime Silvela
Co-authored-by: Armando Ruocco
---
api/v1/database_funcs.go | 24 +-
internal/management/controller/common.go | 12 +
...tion_controller_test.go => common_test.go} | 0
.../controller/database_controller.go | 216 ++++++------------
.../controller/database_controller_test.go | 35 +--
.../controller/publication_controller.go | 11 +-
.../controller/subscription_controller.go | 22 +-
7 files changed, 136 insertions(+), 184 deletions(-)
rename internal/management/controller/{subscription_controller_test.go => common_test.go} (100%)
diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go
index 879d97490c..7679f3236d 100644
--- a/api/v1/database_funcs.go
+++ b/api/v1/database_funcs.go
@@ -16,7 +16,29 @@ limitations under the License.
package v1
-import corev1 "k8s.io/api/core/v1"
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/utils/ptr"
+)
+
+// SetAsFailed sets the database as failed with the given error
+func (db *Database) SetAsFailed(err error) {
+ db.Status.Applied = ptr.To(false)
+ db.Status.Message = err.Error()
+}
+
+// SetAsUnknown sets the database as unknown with the given error
+func (db *Database) SetAsUnknown(err error) {
+ db.Status.Applied = nil
+ db.Status.Message = err.Error()
+}
+
+// SetAsReady sets the database as working correctly
+func (db *Database) SetAsReady() {
+ db.Status.Applied = ptr.To(true)
+ db.Status.Message = ""
+ db.Status.ObservedGeneration = db.Generation
+}
// GetClusterRef returns the cluster reference of the database
func (db *Database) GetClusterRef() corev1.LocalObjectReference {
diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go
index c0d87aeb97..b5013d6657 100644
--- a/internal/management/controller/common.go
+++ b/internal/management/controller/common.go
@@ -19,6 +19,7 @@ package controller
import (
"bytes"
"context"
+ "database/sql"
"fmt"
"maps"
"slices"
@@ -31,6 +32,17 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
)
+// errClusterIsReplica is raised when an object
+// cannot be reconciled because it belongs to a replica cluster
+var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary")
+
+type instanceInterface interface {
+ GetSuperUserDB() (*sql.DB, error)
+ GetClusterName() string
+ GetPodName() string
+ GetNamespaceName() string
+}
+
type markableAsFailed interface {
client.Object
SetAsFailed(err error)
diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/common_test.go
similarity index 100%
rename from internal/management/controller/subscription_controller_test.go
rename to internal/management/controller/common_test.go
diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go
index f1283f2d9c..22a72861fd 100644
--- a/internal/management/controller/database_controller.go
+++ b/internal/management/controller/database_controller.go
@@ -18,23 +18,17 @@ package controller
import (
"context"
- "database/sql"
- "errors"
"fmt"
"time"
"github.com/cloudnative-pg/machinery/pkg/log"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -43,20 +37,10 @@ type DatabaseReconciler struct {
client.Client
Scheme *runtime.Scheme
- instance instanceInterface
+ instance instanceInterface
+ finalizerReconciler *finalizerReconciler[*apiv1.Database]
}
-type instanceInterface interface {
- GetSuperUserDB() (*sql.DB, error)
- GetClusterName() string
- GetPodName() string
- GetNamespaceName() string
-}
-
-// errClusterIsReplica is raised when the database object
-// cannot be reconciled because it belongs to a replica cluster
-var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary")
-
// databaseReconciliationInterval is the time between the
// database reconciliation loop failures
const databaseReconciliationInterval = 30 * time.Second
@@ -76,16 +60,16 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
Namespace: req.Namespace,
Name: req.Name,
}, &database); err != nil {
- // This is a deleted object, there's nothing
- // to do since we don't manage any finalizers.
- if apierrors.IsNotFound(err) {
- return ctrl.Result{}, nil
- }
- return ctrl.Result{}, err
+ contextLogger.Trace("Could not fetch Database", "error", err)
+ return ctrl.Result{}, client.IgnoreNotFound(err)
}
// This is not for me!
if database.Spec.ClusterRef.Name != r.instance.GetClusterName() {
+ contextLogger.Trace("Database is not for this cluster",
+ "cluster", database.Spec.ClusterRef.Name,
+ "expected", r.instance.GetClusterName(),
+ )
return ctrl.Result{}, nil
}
@@ -97,19 +81,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
// Fetch the Cluster from the cache
cluster, err := r.GetCluster(ctx)
if err != nil {
- if apierrors.IsNotFound(err) {
- // The cluster has been deleted.
- // We just need to wait for this instance manager to be terminated
- contextLogger.Debug("Could not find Cluster")
- return ctrl.Result{}, nil
- }
-
- return ctrl.Result{}, fmt.Errorf("could not fetch Cluster: %w", err)
- }
-
- // This is not for me, at least now
- if cluster.Status.CurrentPrimary != r.instance.GetPodName() {
- return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
+ return ctrl.Result{}, markAsFailed(ctx, r.Client, &database, fmt.Errorf("while fetching the cluster: %w", err))
}
contextLogger.Info("Reconciling database")
@@ -122,64 +94,59 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
- // Cannot do anything on a replica cluster
- if cluster.IsReplica() {
- return r.replicaClusterReconciliation(ctx, &database)
+ // This is not for me, at least now
+ if cluster.Status.CurrentPrimary != r.instance.GetPodName() {
+ return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
- // Add the finalizer if we don't have it
- // nolint:nestif
- if database.DeletionTimestamp.IsZero() {
- if controllerutil.AddFinalizer(&database, utils.DatabaseFinalizerName) {
- if err := r.Update(ctx, &database); err != nil {
- return ctrl.Result{}, err
- }
- }
- } else {
- // This database is being deleted
- if controllerutil.ContainsFinalizer(&database, utils.DatabaseFinalizerName) {
- if database.Spec.ReclaimPolicy == apiv1.DatabaseReclaimDelete {
- if err := r.deleteDatabase(ctx, &database); err != nil {
- return ctrl.Result{}, err
- }
- }
-
- // remove our finalizer from the list and update it.
- controllerutil.RemoveFinalizer(&database, utils.DatabaseFinalizerName)
- if err := r.Update(ctx, &database); err != nil {
- return ctrl.Result{}, err
- }
+ // Cannot do anything on a replica cluster
+ if cluster.IsReplica() {
+ if err := markAsUnknown(ctx, r.Client, &database, errClusterIsReplica); err != nil {
+ return ctrl.Result{}, err
}
+ return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
+ }
+ if err := r.finalizerReconciler.reconcile(ctx, &database); err != nil {
+ return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err)
+ }
+ if !database.GetDeletionTimestamp().IsZero() {
return ctrl.Result{}, nil
}
// Make sure the target PG Database is not being managed by another Database Object
if err := r.ensureOnlyOneManager(ctx, database); err != nil {
- return r.failedReconciliation(
- ctx,
- &database,
- err,
- )
+ if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil {
+ contextLogger.Error(err, "while marking as failed the database resource",
+ "error", err,
+ "markError", markErr,
+ )
+ return ctrl.Result{}, fmt.Errorf(
+ "encountered an error while marking as failed the database resource: %w, original error: %w",
+ markErr,
+ err)
+ }
+ return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
- if err := r.reconcileDatabase(
- ctx,
- &database,
- ); err != nil {
- contextLogger.Error(err, "while reconciling database")
- return r.failedReconciliation(
- ctx,
- &database,
- err,
- )
+ if err := r.reconcileDatabase(ctx, &database); err != nil {
+ if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil {
+ contextLogger.Error(err, "while marking as failed the database resource",
+ "error", err,
+ "markError", markErr,
+ )
+ return ctrl.Result{}, fmt.Errorf(
+ "encountered an error while marking as failed the database resource: %w, original error: %w",
+ markErr,
+ err)
+ }
+ return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
- contextLogger.Info("Reconciliation of database completed")
- return r.succeededReconciliation(
- ctx,
- &database,
- )
+ if err := markAsReady(ctx, r.Client, &database); err != nil {
+ return ctrl.Result{}, err
+ }
+ return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
// ensureOnlyOneManager verifies that the target PostgreSQL Database specified by the given Database object
@@ -226,68 +193,16 @@ func (r *DatabaseReconciler) ensureOnlyOneManager(
return nil
}
-// failedReconciliation marks the reconciliation as failed and logs the corresponding error
-func (r *DatabaseReconciler) failedReconciliation(
- ctx context.Context,
- database *apiv1.Database,
- err error,
-) (ctrl.Result, error) {
- oldDatabase := database.DeepCopy()
- database.Status.Message = fmt.Sprintf("reconciliation error: %s", err.Error())
- database.Status.Applied = ptr.To(false)
-
- var statusError *instance.StatusError
- if errors.As(err, &statusError) {
- // The body line of the instance manager contains the human
- // readable error
- database.Status.Message = statusError.Body
- }
-
- if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil {
- return ctrl.Result{}, err
- }
-
- return ctrl.Result{
- RequeueAfter: databaseReconciliationInterval,
- }, nil
-}
-
-// succeededReconciliation marks the reconciliation as succeeded
-func (r *DatabaseReconciler) succeededReconciliation(
- ctx context.Context,
- database *apiv1.Database,
-) (ctrl.Result, error) {
- oldDatabase := database.DeepCopy()
- database.Status.Message = ""
- database.Status.Applied = ptr.To(true)
- database.Status.ObservedGeneration = database.Generation
-
- if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil {
- return ctrl.Result{}, err
+func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1.Database) error {
+ if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete {
+ return nil
}
-
- return ctrl.Result{
- RequeueAfter: databaseReconciliationInterval,
- }, nil
-}
-
-// replicaClusterReconciliation sets the status for a reconciliation that's
-// executed in a replica Cluster
-func (r *DatabaseReconciler) replicaClusterReconciliation(
- ctx context.Context,
- database *apiv1.Database,
-) (ctrl.Result, error) {
- oldDatabase := database.DeepCopy()
- database.Status.Message = errClusterIsReplica.Error()
- database.Status.Applied = nil
-
- if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil {
- return ctrl.Result{}, err
+ sqlDB, err := r.instance.GetSuperUserDB()
+ if err != nil {
+ return fmt.Errorf("while getting DB connection: %w", err)
}
- return ctrl.Result{
- RequeueAfter: databaseReconciliationInterval,
- }, nil
+ return dropDatabase(ctx, sqlDB, db)
}
// NewDatabaseReconciler creates a new database reconciler
@@ -295,10 +210,18 @@ func NewDatabaseReconciler(
mgr manager.Manager,
instance *postgres.Instance,
) *DatabaseReconciler {
- return &DatabaseReconciler{
+ dr := &DatabaseReconciler{
Client: mgr.GetClient(),
instance: instance,
}
+
+ dr.finalizerReconciler = newFinalizerReconciler(
+ mgr.GetClient(),
+ utils.DatabaseFinalizerName,
+ dr.evaluateDropDatabase,
+ )
+
+ return dr
}
// SetupWithManager sets up the controller with the Manager.
@@ -335,12 +258,3 @@ func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.D
return createDatabase(ctx, db, obj)
}
-
-func (r *DatabaseReconciler) deleteDatabase(ctx context.Context, obj *apiv1.Database) error {
- db, err := r.instance.GetSuperUserDB()
- if err != nil {
- return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err)
- }
-
- return dropDatabase(ctx, db, obj)
-}
diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go
index c9c86ac216..37712a2c21 100644
--- a/internal/management/controller/database_controller_test.go
+++ b/internal/management/controller/database_controller_test.go
@@ -34,6 +34,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -109,6 +110,11 @@ var _ = Describe("Managed Database status", func() {
Scheme: schemeBuilder.BuildWithAllKnownScheme(),
instance: &f,
}
+ r.finalizerReconciler = newFinalizerReconciler(
+ fakeClient,
+ utils.DatabaseFinalizerName,
+ r.evaluateDropDatabase,
+ )
})
AfterEach(func() {
@@ -263,7 +269,7 @@ var _ = Describe("Managed Database status", func() {
Expect(apierrors.IsNotFound(err)).To(BeTrue())
})
- It("skips reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) {
+ It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) {
// since the fakeClient has the `cluster-example` cluster, let's reference
// another cluster `cluster-other` that is not found by the fakeClient
pgInstance := postgres.NewInstance().
@@ -301,8 +307,8 @@ var _ = Describe("Managed Database status", func() {
}, &updatedDatabase)
Expect(err).ToNot(HaveOccurred())
- Expect(updatedDatabase.Status.Applied).Should(BeNil())
- Expect(updatedDatabase.Status.Message).Should(BeEmpty())
+ Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(updatedDatabase.Status.Message).Should(ContainSubstring(`"cluster-other" not found`))
})
It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) {
@@ -333,29 +339,6 @@ var _ = Describe("Managed Database status", func() {
Expect(result).Should(BeZero()) // nothing to do, since the DB is being deleted
})
- It("properly marks the status on a succeeded reconciliation", func(ctx SpecContext) {
- _, err := r.succeededReconciliation(ctx, database)
- Expect(err).ToNot(HaveOccurred())
- Expect(database.Status.Applied).To(HaveValue(BeTrue()))
- Expect(database.Status.Message).To(BeEmpty())
- })
-
- It("properly marks the status on a failed reconciliation", func(ctx SpecContext) {
- exampleError := fmt.Errorf("sample error for database %s", database.Spec.Name)
-
- _, err := r.failedReconciliation(ctx, database, exampleError)
- Expect(err).ToNot(HaveOccurred())
- Expect(database.Status.Applied).To(HaveValue(BeFalse()))
- Expect(database.Status.Message).To(ContainSubstring(exampleError.Error()))
- })
-
- It("properly marks the status on a replica Cluster reconciliation", func(ctx SpecContext) {
- _, err := r.replicaClusterReconciliation(ctx, database)
- Expect(err).ToNot(HaveOccurred())
- Expect(database.Status.Applied).To(BeNil())
- Expect(database.Status.Message).To(BeEquivalentTo(errClusterIsReplica.Error()))
- })
-
It("drops database with ensure absent option", func(ctx SpecContext) {
// Mocking dropDatabase
expectedValue := sqlmock.NewResult(0, 1)
diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go
index 9758ff690d..086b37e778 100644
--- a/internal/management/controller/publication_controller.go
+++ b/internal/management/controller/publication_controller.go
@@ -124,8 +124,15 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request)
if err := r.alignPublication(ctx, &publication); err != nil {
contextLogger.Error(err, "while reconciling publication")
- if err := markAsFailed(ctx, r.Client, &publication, err); err != nil {
- return ctrl.Result{}, err
+ if markErr := markAsFailed(ctx, r.Client, &publication, err); markErr != nil {
+ contextLogger.Error(err, "while marking as failed the publication resource",
+ "error", err,
+ "markError", markErr,
+ )
+ return ctrl.Result{}, fmt.Errorf(
+ "encountered an error while marking as failed the publication resource: %w, original error: %w",
+ markErr,
+ err)
}
return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
}
diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go
index 8019c3dd2b..16a1165b7f 100644
--- a/internal/management/controller/subscription_controller.go
+++ b/internal/management/controller/subscription_controller.go
@@ -122,16 +122,30 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request
subscription.Spec.PublicationDBName,
)
if err != nil {
- if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil {
- return ctrl.Result{}, err
+ if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil {
+ contextLogger.Error(err, "while marking as failed the subscription resource",
+ "error", err,
+ "markError", markErr,
+ )
+ return ctrl.Result{}, fmt.Errorf(
+ "encountered an error while marking as failed the subscription resource: %w, original error: %w",
+ markErr,
+ err)
}
return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
}
if err := r.alignSubscription(ctx, &subscription, connString); err != nil {
contextLogger.Error(err, "while reconciling subscription")
- if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil {
- return ctrl.Result{}, err
+ if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil {
+ contextLogger.Error(err, "while marking as failed the subscription resource",
+ "error", err,
+ "markError", markErr,
+ )
+ return ctrl.Result{}, fmt.Errorf(
+ "encountered an error while marking as failed the subscription resource: %w, original error: %w",
+ markErr,
+ err)
}
return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
}
From 7dcb199181b753db8badf7fd12a19219221376b1 Mon Sep 17 00:00:00 2001
From: Tao Li
Date: Thu, 5 Dec 2024 20:19:22 +0800
Subject: [PATCH 199/836] fix: eliminate redundant Cluster status updates with
image catalog (#6277)
This patch resolves the issue of redundant Cluster status updates
triggered when the image catalog is enabled.
Closes: #6276
Signed-off-by: Tao Li
---
internal/controller/cluster_image.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go
index 95ac9ad668..547b610cc9 100644
--- a/internal/controller/cluster_image.go
+++ b/internal/controller/cluster_image.go
@@ -113,7 +113,7 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C
}
// If the image is different, we set it into the cluster status
- if cluster.Spec.ImageName != catalogImage {
+ if cluster.Status.Image != catalogImage {
cluster.Status.Image = catalogImage
patch := client.MergeFrom(oldCluster)
if err := r.Status().Patch(ctx, cluster, patch); err != nil {
From d8af714ebade46e4478957e429c4a07a473e8f55 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Thu, 5 Dec 2024 14:14:21 +0100
Subject: [PATCH 200/836] docs: cosmetic changes to `Database` spec (#6280)
Closes #6278
Signed-off-by: Gabriele Bartolini
Signed-off-by: Marco Nenciarini
Signed-off-by: Jaime Silvela
Co-authored-by: Marco Nenciarini
Co-authored-by: Jaime Silvela
---
api/v1/database_types.go | 85 ++++++++++-------
.../bases/postgresql.cnpg.io_databases.yaml | 92 ++++++++++++-------
docs/src/cloudnative-pg.v1.md | 84 ++++++++++-------
3 files changed, 162 insertions(+), 99 deletions(-)
diff --git a/api/v1/database_types.go b/api/v1/database_types.go
index 5e6ecd834a..3c759ab767 100644
--- a/api/v1/database_types.go
+++ b/api/v1/database_types.go
@@ -35,110 +35,129 @@ const (
DatabaseReclaimRetain DatabaseReclaimPolicy = "retain"
)
-// DatabaseSpec is the specification of a Postgresql Database
+// DatabaseSpec is the specification of a Postgresql Database, built around the
+// `CREATE DATABASE`, `ALTER DATABASE`, and `DROP DATABASE` SQL commands of
+// PostgreSQL.
// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`"
// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`"
// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`"
type DatabaseSpec struct {
- // The corresponding cluster
+ // The name of the PostgreSQL cluster hosting the database.
ClusterRef corev1.LocalObjectReference `json:"cluster"`
- // Ensure the PostgreSQL database is `present` or `absent` - defaults to "present"
+ // Ensure the PostgreSQL database is `present` or `absent` - defaults to "present".
// +kubebuilder:default:="present"
// +kubebuilder:validation:Enum=present;absent
// +optional
Ensure EnsureOption `json:"ensure,omitempty"`
- // The name inside PostgreSQL
+ // The name of the database to create inside PostgreSQL. This setting cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable"
// +kubebuilder:validation:XValidation:rule="self != 'postgres'",message="the name postgres is reserved"
// +kubebuilder:validation:XValidation:rule="self != 'template0'",message="the name template0 is reserved"
// +kubebuilder:validation:XValidation:rule="self != 'template1'",message="the name template1 is reserved"
Name string `json:"name"`
- // The owner
+ // Maps to the `OWNER` parameter of `CREATE DATABASE`.
+ // Maps to the `OWNER TO` command of `ALTER DATABASE`.
+ // The role name of the user who owns the database inside PostgreSQL.
Owner string `json:"owner"`
- // The name of the template from which to create the new database
+ // Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting
+ // cannot be changed. The name of the template from which to create
+ // this database.
// +optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="template is immutable"
Template string `json:"template,omitempty"`
- // The encoding (cannot be changed)
+ // Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting
+ // cannot be changed. Character set encoding to use in the database.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="encoding is immutable"
// +optional
Encoding string `json:"encoding,omitempty"`
- // The locale (cannot be changed)
- // Sets the default collation order and character classification in the new database.
+ // Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting
+ // cannot be changed. Sets the default collation order and character
+ // classification in the new database.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable"
// +optional
Locale string `json:"locale,omitempty"`
- // The LOCALE_PROVIDER (cannot be changed)
- // This option sets the locale provider for databases created in the new cluster.
- // Available from PostgreSQL 16.
+ // Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This
+ // setting cannot be changed. This option sets the locale provider for
+ // databases created in the new cluster. Available from PostgreSQL 16.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeProvider is immutable"
// +optional
LocaleProvider string `json:"localeProvider,omitempty"`
- // The LC_COLLATE (cannot be changed)
+ // Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This
+ // setting cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCollate is immutable"
// +optional
LcCollate string `json:"localeCollate,omitempty"`
- // The LC_CTYPE (cannot be changed)
+ // Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting
+ // cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCType is immutable"
// +optional
LcCtype string `json:"localeCType,omitempty"`
- // The ICU_LOCALE (cannot be changed)
- // Specifies the ICU locale when the ICU provider is used.
- // This option requires `localeProvider` to be set to `icu`.
- // Available from PostgreSQL 15.
+ // Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This
+ // setting cannot be changed. Specifies the ICU locale when the ICU
+ // provider is used. This option requires `localeProvider` to be set to
+ // `icu`. Available from PostgreSQL 15.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuLocale is immutable"
// +optional
IcuLocale string `json:"icuLocale,omitempty"`
- // The ICU_RULES (cannot be changed)
- // Specifies additional collation rules to customize the behavior of the default collation.
- // This option requires `localeProvider` to be set to `icu`.
- // Available from PostgreSQL 16.
+ // Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting
+ // cannot be changed. Specifies additional collation rules to customize
+ // the behavior of the default collation. This option requires
+ // `localeProvider` to be set to `icu`. Available from PostgreSQL 16.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuRules is immutable"
// +optional
IcuRules string `json:"icuRules,omitempty"`
- // The BUILTIN_LOCALE (cannot be changed)
- // Specifies the locale name when the builtin provider is used.
- // This option requires `localeProvider` to be set to `builtin`.
- // Available from PostgreSQL 17.
+ // Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This
+ // setting cannot be changed. Specifies the locale name when the
+ // builtin provider is used. This option requires `localeProvider` to
+ // be set to `builtin`. Available from PostgreSQL 17.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtinLocale is immutable"
// +optional
BuiltinLocale string `json:"builtinLocale,omitempty"`
- // The COLLATION_VERSION (cannot be changed)
+ // Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This
+ // setting cannot be changed.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collationVersion is immutable"
// +optional
CollationVersion string `json:"collationVersion,omitempty"`
- // True when the database is a template
+ // Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER
+ // DATABASE`. If true, this database is considered a template and can
+ // be cloned by any user with `CREATEDB` privileges.
// +optional
IsTemplate *bool `json:"isTemplate,omitempty"`
- // True when connections to this database are allowed
+ // Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and
+ // `ALTER DATABASE`. If false then no one can connect to this database.
// +optional
AllowConnections *bool `json:"allowConnections,omitempty"`
- // Connection limit, -1 means no limit and -2 means the
- // database is not valid
+ // Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and
+ // `ALTER DATABASE`. How many concurrent connections can be made to
+ // this database. -1 (the default) means no limit.
// +optional
ConnectionLimit *int `json:"connectionLimit,omitempty"`
- // The default tablespace of this database
+ // Maps to the `TABLESPACE` parameter of `CREATE DATABASE`.
+ // Maps to the `SET TABLESPACE` command of `ALTER DATABASE`.
+ // The name of the tablespace (in PostgreSQL) that will be associated
+ // with the new database. This tablespace will be the default
+ // tablespace used for objects created in this database.
// +optional
Tablespace string `json:"tablespace,omitempty"`
- // The policy for end-of-life maintenance of this database
+ // The policy for end-of-life maintenance of this database.
// +kubebuilder:validation:Enum=delete;retain
// +kubebuilder:default:=retain
// +optional
diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml
index 7a1d7c8066..b9db5db349 100644
--- a/config/crd/bases/postgresql.cnpg.io_databases.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml
@@ -59,20 +59,22 @@ spec:
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
properties:
allowConnections:
- description: True when connections to this database are allowed
+ description: |-
+ Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and
+ `ALTER DATABASE`. If false then no one can connect to this database.
type: boolean
builtinLocale:
description: |-
- The BUILTIN_LOCALE (cannot be changed)
- Specifies the locale name when the builtin provider is used.
- This option requires `localeProvider` to be set to `builtin`.
- Available from PostgreSQL 17.
+ Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. Specifies the locale name when the
+ builtin provider is used. This option requires `localeProvider` to
+ be set to `builtin`. Available from PostgreSQL 17.
type: string
x-kubernetes-validations:
- message: builtinLocale is immutable
rule: self == oldSelf
cluster:
- description: The corresponding cluster
+ description: The name of the PostgreSQL cluster hosting the database.
properties:
name:
default: ""
@@ -86,25 +88,30 @@ spec:
type: object
x-kubernetes-map-type: atomic
collationVersion:
- description: The COLLATION_VERSION (cannot be changed)
+ description: |-
+ Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This
+ setting cannot be changed.
type: string
x-kubernetes-validations:
- message: collationVersion is immutable
rule: self == oldSelf
connectionLimit:
description: |-
- Connection limit, -1 means no limit and -2 means the
- database is not valid
+ Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and
+ `ALTER DATABASE`. How many concurrent connections can be made to
+ this database. -1 (the default) means no limit.
type: integer
databaseReclaimPolicy:
default: retain
- description: The policy for end-of-life maintenance of this database
+ description: The policy for end-of-life maintenance of this database.
enum:
- delete
- retain
type: string
encoding:
- description: The encoding (cannot be changed)
+ description: |-
+ Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Character set encoding to use in the database.
type: string
x-kubernetes-validations:
- message: encoding is immutable
@@ -112,65 +119,74 @@ spec:
ensure:
default: present
description: Ensure the PostgreSQL database is `present` or `absent`
- - defaults to "present"
+ - defaults to "present".
enum:
- present
- absent
type: string
icuLocale:
description: |-
- The ICU_LOCALE (cannot be changed)
- Specifies the ICU locale when the ICU provider is used.
- This option requires `localeProvider` to be set to `icu`.
- Available from PostgreSQL 15.
+ Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. Specifies the ICU locale when the ICU
+ provider is used. This option requires `localeProvider` to be set to
+ `icu`. Available from PostgreSQL 15.
type: string
x-kubernetes-validations:
- message: icuLocale is immutable
rule: self == oldSelf
icuRules:
description: |-
- The ICU_RULES (cannot be changed)
- Specifies additional collation rules to customize the behavior of the default collation.
- This option requires `localeProvider` to be set to `icu`.
- Available from PostgreSQL 16.
+ Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Specifies additional collation rules to customize
+ the behavior of the default collation. This option requires
+ `localeProvider` to be set to `icu`. Available from PostgreSQL 16.
type: string
x-kubernetes-validations:
- message: icuRules is immutable
rule: self == oldSelf
isTemplate:
- description: True when the database is a template
+ description: |-
+ Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER
+ DATABASE`. If true, this database is considered a template and can
+ be cloned by any user with `CREATEDB` privileges.
type: boolean
locale:
description: |-
- The locale (cannot be changed)
- Sets the default collation order and character classification in the new database.
+ Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Sets the default collation order and character
+ classification in the new database.
type: string
x-kubernetes-validations:
- message: locale is immutable
rule: self == oldSelf
localeCType:
- description: The LC_CTYPE (cannot be changed)
+ description: |-
+ Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed.
type: string
x-kubernetes-validations:
- message: localeCType is immutable
rule: self == oldSelf
localeCollate:
- description: The LC_COLLATE (cannot be changed)
+ description: |-
+ Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed.
type: string
x-kubernetes-validations:
- message: localeCollate is immutable
rule: self == oldSelf
localeProvider:
description: |-
- The LOCALE_PROVIDER (cannot be changed)
- This option sets the locale provider for databases created in the new cluster.
- Available from PostgreSQL 16.
+ Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. This option sets the locale provider for
+ databases created in the new cluster. Available from PostgreSQL 16.
type: string
x-kubernetes-validations:
- message: localeProvider is immutable
rule: self == oldSelf
name:
- description: The name inside PostgreSQL
+ description: The name of the database to create inside PostgreSQL.
+ This setting cannot be changed.
type: string
x-kubernetes-validations:
- message: name is immutable
@@ -182,14 +198,24 @@ spec:
- message: the name template1 is reserved
rule: self != 'template1'
owner:
- description: The owner
+ description: |-
+ Maps to the `OWNER` parameter of `CREATE DATABASE`.
+ Maps to the `OWNER TO` command of `ALTER DATABASE`.
+ The role name of the user who owns the database inside PostgreSQL.
type: string
tablespace:
- description: The default tablespace of this database
+ description: |-
+ Maps to the `TABLESPACE` parameter of `CREATE DATABASE`.
+ Maps to the `SET TABLESPACE` command of `ALTER DATABASE`.
+ The name of the tablespace (in PostgreSQL) that will be associated
+ with the new database. This tablespace will be the default
+ tablespace used for objects created in this database.
type: string
template:
- description: The name of the template from which to create the new
- database
+ description: |-
+ Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. The name of the template from which to create
+ this database.
type: string
x-kubernetes-validations:
- message: template is immutable
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 7a5b1c193f..48b53866ef 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -2422,7 +2422,9 @@ PostgreSQL cluster from an existing storage
- [Database](#postgresql-cnpg-io-v1-Database)
-DatabaseSpec is the specification of a Postgresql Database
+DatabaseSpec is the specification of a Postgresql Database, built around the
+CREATE DATABASE, ALTER DATABASE, and DROP DATABASE SQL commands of
+PostgreSQL.
@@ -2432,146 +2434,162 @@ PostgreSQL cluster from an existing storage
core/v1.LocalObjectReference
- The corresponding cluster
+ The name of the PostgreSQL cluster hosting the database.
ensure
EnsureOption
- Ensure the PostgreSQL database is present or absent - defaults to "present"
+ Ensure the PostgreSQL database is present or absent - defaults to "present".
name [Required]
string
- The name inside PostgreSQL
+ The name of the database to create inside PostgreSQL. This setting cannot be changed.
owner [Required]
string
- The owner
+ Maps to the OWNER parameter of CREATE DATABASE.
+Maps to the OWNER TO command of ALTER DATABASE.
+The role name of the user who owns the database inside PostgreSQL.
template
string
- The name of the template from which to create the new database
+ Maps to the TEMPLATE parameter of CREATE DATABASE. This setting
+cannot be changed. The name of the template from which to create
+this database.
encoding
string
- The encoding (cannot be changed)
+ Maps to the ENCODING parameter of CREATE DATABASE. This setting
+cannot be changed. Character set encoding to use in the database.
locale
string
- The locale (cannot be changed)
-Sets the default collation order and character classification in the new database.
+ Maps to the LOCALE parameter of CREATE DATABASE. This setting
+cannot be changed. Sets the default collation order and character
+classification in the new database.
localeProvider
string
- The LOCALE_PROVIDER (cannot be changed)
-This option sets the locale provider for databases created in the new cluster.
-Available from PostgreSQL 16.
+ Maps to the LOCALE_PROVIDER parameter of CREATE DATABASE. This
+setting cannot be changed. This option sets the locale provider for
+databases created in the new cluster. Available from PostgreSQL 16.
localeCollate
string
- The LC_COLLATE (cannot be changed)
+ Maps to the LC_COLLATE parameter of CREATE DATABASE. This
+setting cannot be changed.
localeCType
string
- The LC_CTYPE (cannot be changed)
+ Maps to the LC_CTYPE parameter of CREATE DATABASE. This setting
+cannot be changed.
icuLocale
string
- The ICU_LOCALE (cannot be changed)
-Specifies the ICU locale when the ICU provider is used.
-This option requires localeProvider to be set to icu.
-Available from PostgreSQL 15.
+ Maps to the ICU_LOCALE parameter of CREATE DATABASE. This
+setting cannot be changed. Specifies the ICU locale when the ICU
+provider is used. This option requires localeProvider to be set to
+icu. Available from PostgreSQL 15.
icuRules
string
- The ICU_RULES (cannot be changed)
-Specifies additional collation rules to customize the behavior of the default collation.
-This option requires localeProvider to be set to icu.
-Available from PostgreSQL 16.
+ Maps to the ICU_RULES parameter of CREATE DATABASE. This setting
+cannot be changed. Specifies additional collation rules to customize
+the behavior of the default collation. This option requires
+localeProvider to be set to icu. Available from PostgreSQL 16.
builtinLocale
string
- The BUILTIN_LOCALE (cannot be changed)
-Specifies the locale name when the builtin provider is used.
-This option requires localeProvider to be set to builtin.
-Available from PostgreSQL 17.
+ Maps to the BUILTIN_LOCALE parameter of CREATE DATABASE. This
+setting cannot be changed. Specifies the locale name when the
+builtin provider is used. This option requires localeProvider to
+be set to builtin. Available from PostgreSQL 17.
collationVersion
string
- The COLLATION_VERSION (cannot be changed)
+ Maps to the COLLATION_VERSION parameter of CREATE DATABASE. This
+setting cannot be changed.
isTemplate
bool
- True when the database is a template
+ Maps to the IS_TEMPLATE parameter of CREATE DATABASE and ALTER DATABASE. If true, this database is considered a template and can
+be cloned by any user with CREATEDB privileges.
allowConnections
bool
- True when connections to this database are allowed
+ Maps to the ALLOW_CONNECTIONS parameter of CREATE DATABASE and
+ALTER DATABASE. If false then no one can connect to this database.
connectionLimit
int
- Connection limit, -1 means no limit and -2 means the
-database is not valid
+ Maps to the CONNECTION LIMIT clause of CREATE DATABASE and
+ALTER DATABASE. How many concurrent connections can be made to
+this database. -1 (the default) means no limit.
tablespace
string
- The default tablespace of this database
+ Maps to the TABLESPACE parameter of CREATE DATABASE.
+Maps to the SET TABLESPACE command of ALTER DATABASE.
+The name of the tablespace (in PostgreSQL) that will be associated
+with the new database. This tablespace will be the default
+tablespace used for objects created in this database.
databaseReclaimPolicy
DatabaseReclaimPolicy
- The policy for end-of-life maintenance of this database
+ The policy for end-of-life maintenance of this database.
From ed2f89dc5d23f17a09da2fdf196fe31aafca5d52 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Niccol=C3=B2=20Fei?=
Date: Thu, 5 Dec 2024 14:27:18 +0100
Subject: [PATCH 201/836] chore: update object status to failed when referred
cluster is deleted (#6279)
When a cluster referred to by an object is deleted, the object status
was previously left unchanged, which could lead to confusion. This patch
sets the status to `failed`, and the message to `cluster resource has
been deleted, skipping reconciliation`.
Closes #6211
Closes #6172
Signed-off-by: wolfox
Signed-off-by: Armando Ruocco
Signed-off-by: Jaime Silvela
Co-authored-by: wolfox
Co-authored-by: Armando Ruocco
Co-authored-by: Jaime Silvela
---
api/v1/database_funcs.go | 5 +
api/v1/publication_funcs.go | 5 +
api/v1/subscription_funcs.go | 5 +
internal/controller/cluster_controller.go | 2 +-
internal/controller/finalizers_delete.go | 111 +++++++++++-------
internal/controller/finalizers_delete_test.go | 54 +++++++--
6 files changed, 128 insertions(+), 54 deletions(-)
diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go
index 7679f3236d..198e760ce5 100644
--- a/api/v1/database_funcs.go
+++ b/api/v1/database_funcs.go
@@ -40,6 +40,11 @@ func (db *Database) SetAsReady() {
db.Status.ObservedGeneration = db.Generation
}
+// GetStatusMessage returns the status message of the database
+func (db *Database) GetStatusMessage() string {
+ return db.Status.Message
+}
+
// GetClusterRef returns the cluster reference of the database
func (db *Database) GetClusterRef() corev1.LocalObjectReference {
return db.Spec.ClusterRef
diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go
index e67255b68c..bfda3183a3 100644
--- a/api/v1/publication_funcs.go
+++ b/api/v1/publication_funcs.go
@@ -40,6 +40,11 @@ func (pub *Publication) SetAsReady() {
pub.Status.ObservedGeneration = pub.Generation
}
+// GetStatusMessage returns the status message of the publication
+func (pub *Publication) GetStatusMessage() string {
+ return pub.Status.Message
+}
+
// GetClusterRef returns the cluster reference of the publication
func (pub *Publication) GetClusterRef() corev1.LocalObjectReference {
return pub.Spec.ClusterRef
diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go
index 49a418bdae..506bf05b81 100644
--- a/api/v1/subscription_funcs.go
+++ b/api/v1/subscription_funcs.go
@@ -40,6 +40,11 @@ func (sub *Subscription) SetAsReady() {
sub.Status.ObservedGeneration = sub.Generation
}
+// GetStatusMessage returns the status message of the subscription
+func (sub *Subscription) GetStatusMessage() string {
+ return sub.Status.Message
+}
+
// GetClusterRef returns the cluster reference of the subscription
func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference {
return sub.Spec.ClusterRef
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 2cdd503e82..606ecab88c 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -160,7 +160,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
"namespace", req.Namespace,
)
}
- if err := r.deleteFinalizers(ctx, req.NamespacedName); err != nil {
+ if err := r.notifyDeletionToOwnedResources(ctx, req.NamespacedName); err != nil {
contextLogger.Error(
err,
"error while deleting finalizers of objects on the cluster",
diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go
index 0bee4928df..6af03883b1 100644
--- a/internal/controller/finalizers_delete.go
+++ b/internal/controller/finalizers_delete.go
@@ -18,10 +18,10 @@ package controller
import (
"context"
+ "errors"
"github.com/cloudnative-pg/machinery/pkg/log"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@@ -30,78 +30,109 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
-// ClusterReferrer is an object containing a cluster reference
-type ClusterReferrer interface {
- GetClusterRef() corev1.LocalObjectReference
- client.Object
-}
+// notifyDeletionToOwnedResources notifies the cluster deletion to the managed owned resources
+func (r *ClusterReconciler) notifyDeletionToOwnedResources(
+ ctx context.Context,
+ namespacedName types.NamespacedName,
+) error {
+ var dbList apiv1.DatabaseList
+ if err := r.List(ctx, &dbList, client.InNamespace(namespacedName.Namespace)); err != nil {
+ return err
+ }
-// deleteFinalizers deletes object finalizers when the cluster they were in has been deleted
-func (r *ClusterReconciler) deleteFinalizers(ctx context.Context, namespacedName types.NamespacedName) error {
- if err := r.deleteFinalizersForResource(
+ if err := notifyOwnedResourceDeletion(
ctx,
+ r.Client,
namespacedName,
- &apiv1.DatabaseList{},
+ toSliceWithPointers(dbList.Items),
utils.DatabaseFinalizerName,
); err != nil {
return err
}
- if err := r.deleteFinalizersForResource(
+ var pbList apiv1.PublicationList
+ if err := r.List(ctx, &pbList, client.InNamespace(namespacedName.Namespace)); err != nil {
+ return err
+ }
+
+ if err := notifyOwnedResourceDeletion(
ctx,
+ r.Client,
namespacedName,
- &apiv1.PublicationList{},
+ toSliceWithPointers(pbList.Items),
utils.PublicationFinalizerName,
); err != nil {
return err
}
- return r.deleteFinalizersForResource(
+ var sbList apiv1.SubscriptionList
+ if err := r.List(ctx, &sbList, client.InNamespace(namespacedName.Namespace)); err != nil {
+ return err
+ }
+
+ return notifyOwnedResourceDeletion(
ctx,
+ r.Client,
namespacedName,
- &apiv1.SubscriptionList{},
+ toSliceWithPointers(sbList.Items),
utils.SubscriptionFinalizerName,
)
}
-// deleteFinalizersForResource deletes finalizers for a given resource type
-func (r *ClusterReconciler) deleteFinalizersForResource(
+// clusterOwnedResourceWithStatus is a kubernetes resource object owned by a cluster that has status
+// capabilities
+type clusterOwnedResourceWithStatus interface {
+ client.Object
+ GetClusterRef() corev1.LocalObjectReference
+ GetStatusMessage() string
+ SetAsFailed(err error)
+}
+
+func toSliceWithPointers[T any](items []T) []*T {
+ result := make([]*T, len(items))
+ for i, item := range items {
+ result[i] = &item
+ }
+ return result
+}
+
+// notifyOwnedResourceDeletion deletes finalizers for a given resource type
+func notifyOwnedResourceDeletion[T clusterOwnedResourceWithStatus](
ctx context.Context,
+ cli client.Client,
namespacedName types.NamespacedName,
- list client.ObjectList,
+ objects []T,
finalizerName string,
) error {
contextLogger := log.FromContext(ctx)
-
- if err := r.List(ctx, list, client.InNamespace(namespacedName.Namespace)); err != nil {
- return err
- }
-
- items, err := meta.ExtractList(list)
- if err != nil {
- return err
- }
-
- for _, item := range items {
- obj, ok := item.(ClusterReferrer)
- if !ok {
+ for _, obj := range objects {
+ itemLogger := contextLogger.WithValues(
+ "resourceKind", obj.GetObjectKind().GroupVersionKind().Kind,
+ "resourceName", obj.GetName(),
+ "finalizerName", finalizerName,
+ )
+ if obj.GetClusterRef().Name != namespacedName.Name {
continue
}
- if obj.GetClusterRef().Name != namespacedName.Name {
- continue
+ const statusMessage = "cluster resource has been deleted, skipping reconciliation"
+
+ origObj := obj.DeepCopyObject().(T)
+
+ if obj.GetStatusMessage() != statusMessage {
+ obj.SetAsFailed(errors.New(statusMessage))
+ if err := cli.Status().Patch(ctx, obj, client.MergeFrom(origObj)); err != nil {
+ itemLogger.Error(err, "error while setting failed status for cluster deletion")
+ return err
+ }
}
- origObj := obj.DeepCopyObject().(ClusterReferrer)
if controllerutil.RemoveFinalizer(obj, finalizerName) {
- contextLogger.Debug("Removing finalizer from resource",
- "finalizer", finalizerName, "resource", obj.GetName())
- if err := r.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil {
- contextLogger.Error(
+ itemLogger.Debug("Removing finalizer from resource")
+ if err := cli.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil {
+ itemLogger.Error(
err,
- "error while removing finalizer from resource",
- "resource", obj.GetName(),
- "kind", obj.GetObjectKind().GroupVersionKind().Kind,
+ "while removing the finalizer",
"oldFinalizerList", origObj.GetFinalizers(),
"newFinalizerList", obj.GetFinalizers(),
)
diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go
index cc6c0d5651..fcb7b40849 100644
--- a/internal/controller/finalizers_delete_test.go
+++ b/internal/controller/finalizers_delete_test.go
@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -33,7 +34,7 @@ import (
)
// nolint: dupl
-var _ = Describe("CRD finalizers", func() {
+var _ = Describe("Test cleanup of owned objects on cluster deletion", func() {
var (
r ClusterReconciler
scheme *runtime.Scheme
@@ -51,7 +52,7 @@ var _ = Describe("CRD finalizers", func() {
}
})
- It("should delete database finalizers for databases on the cluster", func(ctx SpecContext) {
+ It("should set databases on the cluster as failed and delete their finalizers", func(ctx SpecContext) {
databaseList := &apiv1.DatabaseList{
Items: []apiv1.Database{
{
@@ -68,6 +69,10 @@ var _ = Describe("CRD finalizers", func() {
Name: "cluster",
},
},
+ Status: apiv1.DatabaseStatus{
+ Applied: ptr.To(true),
+ Message: "",
+ },
},
{
ObjectMeta: metav1.ObjectMeta{
@@ -87,9 +92,10 @@ var _ = Describe("CRD finalizers", func() {
},
}
- cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build()
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).
+ WithStatusSubresource(&databaseList.Items[0], &databaseList.Items[1]).Build()
r.Client = cli
- err := r.deleteFinalizers(ctx, namespacedName)
+ err := r.notifyDeletionToOwnedResources(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
for _, db := range databaseList.Items {
@@ -97,6 +103,8 @@ var _ = Describe("CRD finalizers", func() {
err = cli.Get(ctx, client.ObjectKeyFromObject(&db), database)
Expect(err).ToNot(HaveOccurred())
Expect(database.Finalizers).To(BeZero())
+ Expect(database.Status.Applied).To(HaveValue(BeFalse()))
+ Expect(database.Status.Message).To(ContainSubstring("cluster resource has been deleted"))
}
})
@@ -124,16 +132,18 @@ var _ = Describe("CRD finalizers", func() {
cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build()
r.Client = cli
- err := r.deleteFinalizers(ctx, namespacedName)
+ err := r.notifyDeletionToOwnedResources(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
database := &apiv1.Database{}
err = cli.Get(ctx, client.ObjectKeyFromObject(&databaseList.Items[0]), database)
Expect(err).ToNot(HaveOccurred())
Expect(database.Finalizers).To(BeEquivalentTo([]string{utils.DatabaseFinalizerName}))
+ Expect(database.Status.Applied).To(BeNil())
+ Expect(database.Status.Message).ToNot(ContainSubstring("not reconciled"))
})
- It("should delete publication finalizers for publications on the cluster", func(ctx SpecContext) {
+ It("should set publications on the cluster as failed and delete their finalizers", func(ctx SpecContext) {
publicationList := &apiv1.PublicationList{
Items: []apiv1.Publication{
{
@@ -150,6 +160,10 @@ var _ = Describe("CRD finalizers", func() {
Name: "cluster",
},
},
+ Status: apiv1.PublicationStatus{
+ Applied: ptr.To(true),
+ Message: "",
+ },
},
{
ObjectMeta: metav1.ObjectMeta{
@@ -169,9 +183,10 @@ var _ = Describe("CRD finalizers", func() {
},
}
- cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build()
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).
+ WithStatusSubresource(&publicationList.Items[0], &publicationList.Items[1]).Build()
r.Client = cli
- err := r.deleteFinalizers(ctx, namespacedName)
+ err := r.notifyDeletionToOwnedResources(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
for _, pub := range publicationList.Items {
@@ -179,6 +194,8 @@ var _ = Describe("CRD finalizers", func() {
err = cli.Get(ctx, client.ObjectKeyFromObject(&pub), publication)
Expect(err).ToNot(HaveOccurred())
Expect(publication.Finalizers).To(BeZero())
+ Expect(publication.Status.Applied).To(HaveValue(BeFalse()))
+ Expect(publication.Status.Message).To(ContainSubstring("cluster resource has been deleted"))
}
})
@@ -205,16 +222,18 @@ var _ = Describe("CRD finalizers", func() {
cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build()
r.Client = cli
- err := r.deleteFinalizers(ctx, namespacedName)
+ err := r.notifyDeletionToOwnedResources(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
publication := &apiv1.Publication{}
err = cli.Get(ctx, client.ObjectKeyFromObject(&publicationList.Items[0]), publication)
Expect(err).ToNot(HaveOccurred())
Expect(publication.Finalizers).To(BeEquivalentTo([]string{utils.PublicationFinalizerName}))
+ Expect(publication.Status.Applied).To(BeNil())
+ Expect(publication.Status.Message).ToNot(ContainSubstring("not reconciled"))
})
- It("should delete subscription finalizers for subscriptions on the cluster", func(ctx SpecContext) {
+ It("should set subscriptions on the cluster as failed and delete their finalizers ", func(ctx SpecContext) {
subscriptionList := &apiv1.SubscriptionList{
Items: []apiv1.Subscription{
{
@@ -231,6 +250,10 @@ var _ = Describe("CRD finalizers", func() {
Name: "cluster",
},
},
+ Status: apiv1.SubscriptionStatus{
+ Applied: ptr.To(true),
+ Message: "",
+ },
},
{
ObjectMeta: metav1.ObjectMeta{
@@ -250,9 +273,10 @@ var _ = Describe("CRD finalizers", func() {
},
}
- cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build()
+ cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).
+ WithStatusSubresource(&subscriptionList.Items[0], &subscriptionList.Items[1]).Build()
r.Client = cli
- err := r.deleteFinalizers(ctx, namespacedName)
+ err := r.notifyDeletionToOwnedResources(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
for _, sub := range subscriptionList.Items {
@@ -260,6 +284,8 @@ var _ = Describe("CRD finalizers", func() {
err = cli.Get(ctx, client.ObjectKeyFromObject(&sub), subscription)
Expect(err).ToNot(HaveOccurred())
Expect(subscription.Finalizers).To(BeZero())
+ Expect(subscription.Status.Applied).To(HaveValue(BeFalse()))
+ Expect(subscription.Status.Message).To(ContainSubstring("cluster resource has been deleted"))
}
})
@@ -286,12 +312,14 @@ var _ = Describe("CRD finalizers", func() {
cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build()
r.Client = cli
- err := r.deleteFinalizers(ctx, namespacedName)
+ err := r.notifyDeletionToOwnedResources(ctx, namespacedName)
Expect(err).ToNot(HaveOccurred())
subscription := &apiv1.Subscription{}
err = cli.Get(ctx, client.ObjectKeyFromObject(&subscriptionList.Items[0]), subscription)
Expect(err).ToNot(HaveOccurred())
Expect(subscription.Finalizers).To(BeEquivalentTo([]string{utils.SubscriptionFinalizerName}))
+ Expect(subscription.Status.Applied).To(BeNil())
+ Expect(subscription.Status.Message).ToNot(ContainSubstring("not reconciled"))
})
})
From e2967abe6d86ba11425b2ecc38336be339990ea8 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 5 Dec 2024 15:04:26 +0100
Subject: [PATCH 202/836] fix(plugin): pass context in `psql` command (#6257)
We were not passing the context from the `kubectl cnpg` plugin call
to the `kubectl` command executed when invoking `psql`.
This patch resolves the issue by introducing a new parameter that
captures the Kubernetes context used to call the plugin.
Closes #6227
Closes #4332
Signed-off-by: Jonathan Gonzalez V.
---
internal/cmd/plugin/plugin.go | 5 +++++
internal/cmd/plugin/psql/cmd.go | 1 +
internal/cmd/plugin/psql/psql.go | 7 +++++++
3 files changed, 13 insertions(+)
diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go
index b9af618b84..b22c1a9d6c 100644
--- a/internal/cmd/plugin/plugin.go
+++ b/internal/cmd/plugin/plugin.go
@@ -43,6 +43,9 @@ var (
// Namespace to operate in
Namespace string
+ // KubeContext to operate with
+ KubeContext string
+
// NamespaceExplicitlyPassed indicates if the namespace was passed manually
NamespaceExplicitlyPassed bool
@@ -96,6 +99,8 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error {
return err
}
+ KubeContext = *configFlags.Context
+
ClientInterface = kubernetes.NewForConfigOrDie(Config)
return utils.DetectSecurityContextConstraints(ClientInterface.Discovery())
diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go
index 6a2bfb6cf1..514c39a853 100644
--- a/internal/cmd/plugin/psql/cmd.go
+++ b/internal/cmd/plugin/psql/cmd.go
@@ -45,6 +45,7 @@ func NewCmd() *cobra.Command {
psqlOptions := CommandOptions{
Replica: replica,
Namespace: plugin.Namespace,
+ Context: plugin.KubeContext,
AllocateTTY: allocateTTY,
PassStdin: passStdin,
Args: psqlArgs,
diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go
index f20d5ce727..35d43c16ff 100644
--- a/internal/cmd/plugin/psql/psql.go
+++ b/internal/cmd/plugin/psql/psql.go
@@ -59,6 +59,9 @@ type CommandOptions struct {
// The Namespace where we're working in
Namespace string
+ // The Context to execute the command
+ Context string
+
// Whether we should we allocate a TTY for psql
AllocateTTY bool
@@ -106,6 +109,10 @@ func (psql *Command) getKubectlInvocation() ([]string, error) {
result := make([]string, 0, 13+len(psql.Args))
result = append(result, "kubectl", "exec")
+ if psql.Context != "" {
+ result = append(result, "--context", psql.Context)
+ }
+
if psql.AllocateTTY {
result = append(result, "-t")
}
From ddb8b36ee33b180c3b67b9198161a1b804c1276f Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Thu, 5 Dec 2024 17:05:34 +0100
Subject: [PATCH 203/836] fix(bootstrap): clean up the WAL volume before
initializing a cluster (#6265)
This patch ensures that the WAL volume is cleaned during the bootstrap
as we clean the data directory volume.
Closes #6264
Signed-off-by: Marco Nenciarini
Signed-off-by: Leonardo Cecchi
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Co-authored-by: Leonardo Cecchi
Co-authored-by: Jaime Silvela
Co-authored-by: Armando Ruocco
---
internal/cmd/manager/instance/initdb/cmd.go | 2 +-
internal/cmd/manager/instance/join/cmd.go | 2 +-
.../cmd/manager/instance/restore/restore.go | 2 +-
pkg/management/postgres/initdb.go | 83 ++++++++----
pkg/management/postgres/initdb_test.go | 124 ++++++++++++++++++
5 files changed, 187 insertions(+), 26 deletions(-)
create mode 100644 pkg/management/postgres/initdb_test.go
diff --git a/internal/cmd/manager/instance/initdb/cmd.go b/internal/cmd/manager/instance/initdb/cmd.go
index 81c453544d..09928c9a90 100644
--- a/internal/cmd/manager/instance/initdb/cmd.go
+++ b/internal/cmd/manager/instance/initdb/cmd.go
@@ -150,7 +150,7 @@ func NewCmd() *cobra.Command {
func initSubCommand(ctx context.Context, info postgres.InitInfo) error {
contextLogger := log.FromContext(ctx)
- err := info.CheckTargetDataDirectory(ctx)
+ err := info.EnsureTargetDirectoriesDoNotExist(ctx)
if err != nil {
return err
}
diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go
index e708d79a2d..d29c59f09e 100644
--- a/internal/cmd/manager/instance/join/cmd.go
+++ b/internal/cmd/manager/instance/join/cmd.go
@@ -95,7 +95,7 @@ func NewCmd() *cobra.Command {
func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postgres.InitInfo) error {
contextLogger := log.FromContext(ctx)
- if err := info.CheckTargetDataDirectory(ctx); err != nil {
+ if err := info.EnsureTargetDirectoriesDoNotExist(ctx); err != nil {
return err
}
diff --git a/internal/cmd/manager/instance/restore/restore.go b/internal/cmd/manager/instance/restore/restore.go
index 7c05ea097b..5f2b81974f 100644
--- a/internal/cmd/manager/instance/restore/restore.go
+++ b/internal/cmd/manager/instance/restore/restore.go
@@ -68,7 +68,7 @@ func (r *restoreRunnable) Start(ctx context.Context) error {
func restoreSubCommand(ctx context.Context, info postgres.InitInfo, cli client.Client) error {
contextLogger := log.FromContext(ctx)
- if err := info.CheckTargetDataDirectory(ctx); err != nil {
+ if err := info.EnsureTargetDirectoriesDoNotExist(ctx); err != nil {
return err
}
diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go
index e78d434dca..4f0b70d364 100644
--- a/pkg/management/postgres/initdb.go
+++ b/pkg/management/postgres/initdb.go
@@ -112,11 +112,11 @@ type InitInfo struct {
TablespaceMapFile []byte
}
-// CheckTargetDataDirectory ensures that the target data directory does not exist.
-// This is a safety check we do before initializing a new instance data directory.
+// EnsureTargetDirectoriesDoNotExist ensures that the target data and WAL directories do not exist.
+// This is a safety check we do before initializing a new instance.
//
// If the PGDATA directory already exists and contains a valid PostgreSQL control file,
-// the function moves its contents to a uniquely named directory.
+// the function moves the contents to uniquely named directories.
// If no valid control file is found, the function assumes the directory is the result of
// a failed initialization attempt and removes it.
//
@@ -132,47 +132,84 @@ type InitInfo struct {
// important user data. This is particularly relevant when using static provisioning
// of PersistentVolumeClaims (PVCs), as it prevents accidental overwriting of a valid
// data directory that may exist in the PersistentVolumes (PVs).
-func (info InitInfo) CheckTargetDataDirectory(ctx context.Context) error {
+func (info InitInfo) EnsureTargetDirectoriesDoNotExist(ctx context.Context) error {
contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData)
pgDataExists, err := fileutils.FileExists(info.PgData)
if err != nil {
- contextLogger.Error(err, "Error while checking for an existing PGData")
- return fmt.Errorf("while verifying is PGDATA exists: %w", err)
+ contextLogger.Error(err, "Error while checking for an existing data directory")
+ return fmt.Errorf("while verifying if the data directory exists: %w", err)
}
- if !pgDataExists {
- // The PGDATA directory doesn't exist. We can definitely
- // write to it
+
+ pgWalExists := false
+ if info.PgWal != "" {
+ if pgWalExists, err = fileutils.FileExists(info.PgWal); err != nil {
+ contextLogger.Error(err, "Error while checking for an existing WAL directory")
+ return fmt.Errorf("while verifying if the WAL directory exists: %w", err)
+ }
+ }
+
+ if !pgDataExists && !pgWalExists {
return nil
}
- // We've an existing directory. Let's check if this is a real
- // PGDATA directory or not.
out, err := info.GetInstance().GetPgControldata()
- if err != nil {
- contextLogger.Info("pg_controldata check on existing directory failed, cleaning it up",
- "out", out, "err", err)
+ if err == nil {
+ contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folders", "out", out)
+ return info.renameExistingTargetDataDirectories(ctx, pgWalExists)
+ }
+
+ contextLogger.Info("pg_controldata check on existing directory failed, cleaning up folders", "err", err, "out", out)
+ return info.removeExistingTargetDataDirectories(ctx, pgDataExists, pgWalExists)
+}
+func (info InitInfo) removeExistingTargetDataDirectories(ctx context.Context, pgDataExists, pgWalExists bool) error {
+ contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData, "pgwal", info.PgWal)
+
+ if pgDataExists {
+ contextLogger.Info("cleaning up existing data directory")
if err := fileutils.RemoveDirectory(info.PgData); err != nil {
contextLogger.Error(err, "error while cleaning up existing data directory")
return err
}
+ }
- return nil
+ if pgWalExists {
+ contextLogger.Info("cleaning up existing WAL directory")
+ if err := fileutils.RemoveDirectory(info.PgWal); err != nil {
+ contextLogger.Error(err, "error while cleaning up existing WAL directory")
+ return err
+ }
}
- renamedDirectoryName := fmt.Sprintf("%s_%s", info.PgData, fileutils.FormatFriendlyTimestamp(time.Now()))
- contextLogger = contextLogger.WithValues(
- "out", out,
- "newName", renamedDirectoryName,
- )
+ return nil
+}
+
+func (info InitInfo) renameExistingTargetDataDirectories(ctx context.Context, pgWalExists bool) error {
+ contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData, "pgwal", info.PgWal)
- contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folder")
- if err := os.Rename(info.PgData, renamedDirectoryName); err != nil {
- contextLogger.Error(err, "error while renaming existing data directory")
+ suffixTimestamp := fileutils.FormatFriendlyTimestamp(time.Now())
+
+ pgdataNewName := fmt.Sprintf("%s_%s", info.PgData, suffixTimestamp)
+ contextLogger = contextLogger.WithValues()
+
+ contextLogger.Info("renaming the data directory", "pgdataNewName", pgdataNewName)
+ if err := os.Rename(info.PgData, pgdataNewName); err != nil {
+ contextLogger.Error(err, "error while renaming existing data directory",
+ "pgdataNewName", pgdataNewName)
return fmt.Errorf("while renaming existing data directory: %w", err)
}
+ if pgWalExists {
+ pgwalNewName := fmt.Sprintf("%s_%s", info.PgWal, suffixTimestamp)
+
+ contextLogger.Info("renaming the WAL directory", "pgwalNewName", pgwalNewName)
+ if err := os.Rename(info.PgWal, pgwalNewName); err != nil {
+ contextLogger.Error(err, "error while renaming existing WAL directory")
+ return fmt.Errorf("while renaming existing WAL directory: %w", err)
+ }
+ }
+
return nil
}
diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go
new file mode 100644
index 0000000000..985c42293b
--- /dev/null
+++ b/pkg/management/postgres/initdb_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postgres
+
+import (
+ "os"
+ "path/filepath"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("EnsureTargetDirectoriesDoNotExist", func() {
+ var initInfo InitInfo
+
+ BeforeEach(func() {
+ initInfo = InitInfo{
+ PgData: GinkgoT().TempDir(),
+ PgWal: GinkgoT().TempDir(),
+ }
+ Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed())
+ Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed())
+ })
+
+ It("should do nothing if both data and WAL directories do not exist", func(ctx SpecContext) {
+ Expect(os.RemoveAll(initInfo.PgData)).Should(Succeed())
+ Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed())
+
+ err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist))
+ Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist))
+ })
+
+ It("should remove existing directories if pg_controldata check fails", func(ctx SpecContext) {
+ err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist))
+ Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist))
+ })
+
+ It("should remove data directory even if WAL directory is not present", func(ctx SpecContext) {
+ Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed())
+
+ err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist))
+ Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist))
+ })
+
+ It("should remove WAL directory even if data directory is not present", func(ctx SpecContext) {
+ Expect(os.RemoveAll(initInfo.PgData)).Should(Succeed())
+
+ err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist))
+ Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist))
+ })
+})
+
+var _ = Describe("renameExistingTargetDataDirectories", func() {
+ var initInfo InitInfo
+
+ BeforeEach(func() {
+ initInfo = InitInfo{
+ PgData: GinkgoT().TempDir(),
+ PgWal: GinkgoT().TempDir(),
+ }
+ Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed())
+ Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed())
+ })
+
+ It("should rename existing data and WAL directories", func(ctx SpecContext) {
+ err := initInfo.renameExistingTargetDataDirectories(ctx, true)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist))
+ Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist))
+
+ filelist, err := filepath.Glob(initInfo.PgData + "_*")
+ Expect(err).ToNot(HaveOccurred())
+ Expect(filelist).To(HaveLen(1))
+
+ filelist, err = filepath.Glob(initInfo.PgWal + "_*")
+ Expect(err).ToNot(HaveOccurred())
+ Expect(filelist).To(HaveLen(1))
+ })
+
+ It("should rename existing data without WAL directories", func(ctx SpecContext) {
+ Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed())
+
+ err := initInfo.renameExistingTargetDataDirectories(ctx, false)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist))
+ Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist))
+
+ filelist, err := filepath.Glob(initInfo.PgData + "_*")
+ Expect(err).ToNot(HaveOccurred())
+ Expect(filelist).To(HaveLen(1))
+
+ filelist, err = filepath.Glob(initInfo.PgWal + "_*")
+ Expect(err).ToNot(HaveOccurred())
+ Expect(filelist).To(BeEmpty())
+ })
+})
From e2f640fe83b56a6f65b26a1f4c7b22358000c296 Mon Sep 17 00:00:00 2001
From: "David E. Wheeler" <46604+theory@users.noreply.github.com>
Date: Thu, 5 Dec 2024 13:30:49 -0800
Subject: [PATCH 204/836] docs: define RPO and RTO (#6239)
Signed-off-by: David E. Wheeler
---
.wordlist-en-custom.txt | 2 ++
docs/src/architecture.md | 11 ++++++-----
docs/src/backup.md | 11 ++++++-----
docs/src/before_you_start.md | 7 +++++++
docs/src/failover.md | 3 ++-
docs/src/faq.md | 4 ++--
docs/src/instance_manager.md | 18 ++++++++++--------
docs/src/operator_capability_levels.md | 20 +++++++++++---------
docs/src/replication.md | 14 ++++++++------
docs/src/rolling_update.md | 5 +++--
docs/src/wal_archiving.md | 2 +-
11 files changed, 58 insertions(+), 39 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index f5fe08815f..9216cd19d7 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -361,7 +361,9 @@ README
RHSA
RLS
RPO
+rpo
RTO
+rto
RUNTIME
ReadWriteOnce
RedHat
diff --git a/docs/src/architecture.md b/docs/src/architecture.md
index 326f50fdd1..509461e26d 100644
--- a/docs/src/architecture.md
+++ b/docs/src/architecture.md
@@ -354,11 +354,12 @@ only write inside a single Kubernetes cluster, at any time.
However, for business continuity objectives it is fundamental to:
-- reduce global **recovery point objectives** (RPO) by storing PostgreSQL backup data
- in multiple locations, regions and possibly using different providers
- (Disaster Recovery)
-- reduce global **recovery time objectives** (RTO) by taking advantage of PostgreSQL
- replication beyond the primary Kubernetes cluster (High Availability)
+- reduce global **recovery point objectives** ([RPO](before_you_start.md#rpo))
+ by storing PostgreSQL backup data in multiple locations, regions and possibly
+ using different providers (Disaster Recovery)
+- reduce global **recovery time objectives** ([RTO](before_you_start.md#rto))
+ by taking advantage of PostgreSQL replication beyond the primary Kubernetes
+ cluster (High Availability)
In order to address the above concerns, CloudNativePG introduces the concept of
a PostgreSQL Topology that is distributed across different Kubernetes clusters
diff --git a/docs/src/backup.md b/docs/src/backup.md
index fac42c56f9..4c3f8cb172 100644
--- a/docs/src/backup.md
+++ b/docs/src/backup.md
@@ -66,7 +66,8 @@ as they can simply rely on the WAL archive to synchronize across long
distances, extending disaster recovery goals across different regions.
When you [configure a WAL archive](wal_archiving.md), CloudNativePG provides
-out-of-the-box an RPO <= 5 minutes for disaster recovery, even across regions.
+out-of-the-box an [RPO](before_you_start.md#rpo) <= 5 minutes for disaster
+recovery, even across regions.
!!! Important
Our recommendation is to always setup the WAL archive in production.
@@ -118,9 +119,9 @@ including:
- availability of a trusted storage class that supports volume snapshots
- size of the database: with object stores, the larger your database, the
longer backup and, most importantly, recovery procedures take (the latter
- impacts RTO); in presence of Very Large Databases (VLDB), the general
- advice is to rely on Volume Snapshots as, thanks to copy-on-write, they
- provide faster recovery
+ impacts [RTO](before_you_start.md#rto)); in presence of Very Large Databases
+ (VLDB), the general advice is to rely on Volume Snapshots as, thanks to
+ copy-on-write, they provide faster recovery
- data mobility and possibility to store or relay backup files on a
secondary location in a different region, or any subsequent one
- other factors, mostly based on the confidence and familiarity with the
@@ -188,7 +189,7 @@ In Kubernetes CronJobs, the equivalent expression is `0 0 * * *` because seconds
are not included.
!!! Hint
- Backup frequency might impact your recovery time object (RTO) after a
+ Backup frequency might impact your recovery time objective ([RTO](before_you_start.md#rto)) after a
disaster which requires a full or Point-In-Time recovery operation. Our
advice is that you regularly test your backups by recovering them, and then
measuring the time it takes to recover from scratch so that you can refine
diff --git a/docs/src/before_you_start.md b/docs/src/before_you_start.md
index 2d6c0377fe..7ebc61d732 100644
--- a/docs/src/before_you_start.md
+++ b/docs/src/before_you_start.md
@@ -131,6 +131,13 @@ PVC group
belonging to the same PostgreSQL instance, namely the main volume containing
the PGDATA (`storage`) and the volume for WALs (`walStorage`).
+RTO
+: Acronym for "recovery time objective", the amount of time a system can be
+ unavailable without adversely impacting the application.
+
+RPO
+: Acronym for "recovery point objective", a calculation of the level of
+ acceptable data loss following a disaster recovery scenario.
## Cloud terminology
diff --git a/docs/src/failover.md b/docs/src/failover.md
index 8469747104..89ecac7d3f 100644
--- a/docs/src/failover.md
+++ b/docs/src/failover.md
@@ -46,7 +46,8 @@ During the time the failing primary is being shut down:
## RTO and RPO impact
-Failover may result in the service being impacted and/or data being lost:
+Failover may result in the service being impacted ([RTO](before_you_start.md#rto))
+and/or data being lost ([RPO](before_you_start.md#rpo)):
1. During the time when the primary has started to fail, and before the controller
starts failover procedures, queries in transit, WAL writes, checkpoints and
diff --git a/docs/src/faq.md b/docs/src/faq.md
index 19137d47c6..f8f92e2253 100644
--- a/docs/src/faq.md
+++ b/docs/src/faq.md
@@ -451,8 +451,8 @@ single cluster, namely:
- storage: use dedicated storage for each worker node running Postgres
Use at least one standby, preferably at least two, so that you can configure
-synchronous replication in the cluster, introducing RPO=0 for high
-availability.
+synchronous replication in the cluster, introducing [RPO](before_you_start.md#rpo)=0
+for high availability.
If you do not have availability zones - normally the case of on-premise
installations - separate on worker nodes and storage.
diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md
index ce13adbab2..53d13c4e4d 100644
--- a/docs/src/instance_manager.md
+++ b/docs/src/instance_manager.md
@@ -5,7 +5,7 @@ It simply relies on the Kubernetes API server and a native key component called:
the **Postgres instance manager**.
The instance manager takes care of the entire lifecycle of the PostgreSQL
-leading process (also known as `postmaster`).
+server process (also known as `postmaster`).
When you create a new cluster, the operator makes a Pod per instance.
The field `.spec.instances` specifies how many instances to create.
@@ -182,8 +182,9 @@ seconds.
!!! Important
In order to avoid any data loss in the Postgres cluster, which impacts
- the database RPO, don't delete the Pod where the primary instance is running.
- In this case, perform a switchover to another instance first.
+ the database [RPO](before_you_start.md#rpo), don't delete the Pod where
+ the primary instance is running. In this case, perform a switchover to
+ another instance first.
### Shutdown of the primary during a switchover
@@ -197,11 +198,12 @@ the time given to the former primary to shut down gracefully and archive all
the WAL files. By default it is set to `3600` (1 hour).
!!! Warning
- The `.spec.switchoverDelay` option affects the RPO and RTO of your
- PostgreSQL database. Setting it to a low value, might favor RTO over RPO
- but lead to data loss at cluster level and/or backup level. On the contrary,
- setting it to a high value, might remove the risk of data loss while leaving
- the cluster without an active primary for a longer time during the switchover.
+ The `.spec.switchoverDelay` option affects the [RPO](before_you_start.md#rpo)
+ and [RTO](before_you_start.md#rto) of your PostgreSQL database. Setting it to
+ a low value, might favor RTO over RPO but lead to data loss at cluster level
+ and/or backup level. On the contrary, setting it to a high value, might remove
+ the risk of data loss while leaving the cluster without an active primary for a
+ longer time during the switchover.
## Failover
diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md
index c961a27154..3975036803 100644
--- a/docs/src/operator_capability_levels.md
+++ b/docs/src/operator_capability_levels.md
@@ -336,11 +336,12 @@ continuity and scalability.
*Disaster recovery* is a business continuity component that requires
that both backup and recovery of a database work correctly. While as a
-starting point, the goal is to achieve RPO < 5 minutes, the long-term goal is
-to implement RPO=0 backup solutions. *High availability* is the other
-important component of business continuity. Through PostgreSQL native
-physical replication and hot standby replicas, it allows the operator to perform
-failover and switchover operations. This area includes enhancements in:
+starting point, the goal is to achieve [RPO](before_you_start.md#rpo) < 5
+minutes, the long-term goal is to implement RPO=0 backup solutions. *High
+availability* is the other important component of business continuity. Through
+PostgreSQL native physical replication and hot standby replicas, it allows the
+operator to perform failover and switchover operations. This area includes
+enhancements in:
- Control of PostgreSQL physical replication, such as synchronous replication,
(cascading) replication clusters, and so on
@@ -404,8 +405,9 @@ database snapshots with cold backups.
### Backups from a standby
The operator supports offloading base backups onto a standby without impacting
-the RPO of the database. This allows resources to be preserved on the primary, in
-particular I/O, for standard database operations.
+the [RPO](before_you_start.md#rpo) of the database. This allows resources to
+be preserved on the primary, in particular I/O, for standard database
+operations.
### Full restore from a backup
@@ -460,8 +462,8 @@ switchover across data centers remains necessary.)
Additionally, the flexibility extends to creating delayed replica clusters
intentionally lagging behind the primary cluster. This intentional lag aims to
-minimize the Recovery Time Objective (RTO) in the event of unintended errors,
-such as incorrect `DELETE` or `UPDATE` SQL operations.
+minimize the Recovery Time Objective ([RTO](before_you_start.md#rto)) in the
+event of unintended errors, such as incorrect `DELETE` or `UPDATE` SQL operations.
### Distributed Database Topologies
diff --git a/docs/src/replication.md b/docs/src/replication.md
index ff9f0f1f41..fbc37595bb 100644
--- a/docs/src/replication.md
+++ b/docs/src/replication.md
@@ -36,11 +36,12 @@ recovery.
PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through
*hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the
-transaction level, supporting RPO=0 clusters. Cascading replication was added in
-PostgreSQL 9.2 (2012). The foundations for [logical replication](logical_replication.md)
-were established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced
-native support for the publisher/subscriber pattern to replicate data from an
-origin to a destination. The table below summarizes these milestones.
+transaction level, supporting [RPO](before_you_start.md#rpo)=0 clusters. Cascading
+replication was added in PostgreSQL 9.2 (2012). The foundations for
+[logical replication](logical_replication.md) were established in PostgreSQL
+9.4 (2014), and version 10 (2017) introduced native support for the
+publisher/subscriber pattern to replicate data from an origin to a destination. The
+table below summarizes these milestones.
| Version | Year | Feature |
|:-------:|:----:|-----------------------------------------------------------------------|
@@ -528,7 +529,8 @@ availability zone from the primary instance, usually identified by
the `topology.kubernetes.io/zone`
[label on a node](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone).
This would increase the robustness of the cluster in case of an outage in a
-single availability zone, especially in terms of recovery point objective (RPO).
+single availability zone, especially in terms of recovery point objective
+([RPO](before_you_start.md#rpo)).
The idea of anti-affinity is to ensure that sync replicas that participate in
the quorum are chosen from pods running on nodes that have different values for
diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md
index 01907ab416..023cc78c11 100644
--- a/docs/src/rolling_update.md
+++ b/docs/src/rolling_update.md
@@ -57,8 +57,9 @@ The `primaryUpdateMethod` option accepts one of the following values:
There's no one-size-fits-all configuration for the update method, as that
depends on several factors like the actual workload of your database, the
-requirements in terms of RPO and RTO, whether your PostgreSQL architecture is
-shared or shared nothing, and so on.
+requirements in terms of [RPO](before_you_start.md#rpo) and
+[RTO](before_you_start.md#rto), whether your PostgreSQL architecture is shared
+or shared nothing, and so on.
Indeed, being PostgreSQL a primary/standby architecture database management
system, the update process inevitably generates a downtime for your
diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md
index 1f7b60e0c7..5216f96a53 100644
--- a/docs/src/wal_archiving.md
+++ b/docs/src/wal_archiving.md
@@ -43,7 +43,7 @@ segment to be archived.
By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring
that WAL files, even in case of low workloads, are closed and archived
at least every 5 minutes, providing a deterministic time-based value for
- your Recovery Point Objective (RPO). Even though you change the value
+ your Recovery Point Objective ([RPO](before_you_start.md#rpo)). Even though you change the value
of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT),
our experience suggests that the default value set by the operator is
suitable for most use cases.
From d3f2b60afb0b68490ea98437d38ad063125d00af Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Thu, 5 Dec 2024 22:32:12 +0100
Subject: [PATCH 205/836] docs: explain logical replication and database
examples (#6282)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Closes #6281
Signed-off-by: Jaime Silvela
Signed-off-by: Niccolò Fei
Co-authored-by: Niccolò Fei
---
docs/src/samples.md | 67 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 67 insertions(+)
diff --git a/docs/src/samples.md b/docs/src/samples.md
index 38a93256cc..a6ef595d7c 100644
--- a/docs/src/samples.md
+++ b/docs/src/samples.md
@@ -135,3 +135,70 @@ For a list of available options, see [API reference](cloudnative-pg.v1.md).
**Pooler with custom service config**
: [`pooler-external.yaml`](samples/pooler-external.yaml)
+
+## Logical replication via declarative Publication and Subscription objects
+
+Two test manifests contain everything needed to set up logical replication:
+
+**Source cluster with a publication**
+: [`cluster-example-logical-source.yaml`](samples/cluster-example-logical-source.yaml)
+
+Sets up a cluster, `cluster-example` with some tables created in the `app`
+database, and, importantly, *adds replication to the app user*.
+A publication is created for the cluster on the `app` database: note that the
+publication will be reconciled only after the cluster's primary is up and
+running.
+
+**Destination cluster with a subscription**
+: *Prerequisites*: The source cluster with publication, defined as above.
+: [`cluster-example-logical-destination.yaml`](samples/cluster-example-logical-destination.yaml)
+
+Sets up a cluster `cluster-example-dest` with:
+
+- the source cluster defined in the `externalClusters` stanza. Note that it uses
+ the `app` role to connect, which assumes the source cluster grants it
+ `replication` privilege.
+- a bootstrap import of microservice type, with `schemaOnly` enabled
+
+A subscription is created on the destination cluster: note that the subscription
+will be reconciled only after the destination cluster's primary is up and
+running.
+
+After both clusters have been reconciled, together with the publication and
+subscription objects, you can verify that that tables in the source cluster,
+and the data in them, have been replicated in the destination cluster
+
+In addition, there are some standalone example manifests:
+
+**A plain Publication targeting All Tables**
+: *Prerequisites*: an existing cluster `cluster-example`.
+: [`publication-example.yaml`](samples/publication-example.yaml)
+
+**A Publication with a constrained publication target**
+: *Prerequisites*: an existing cluster `cluster-example`.
+: [`publication-example-objects.yaml`](samples/publication-example-objects.yaml)
+
+**A plain Subscription**
+: Prerequisites: an existing cluster `cluster-example` set up as source, with
+ a publication `pub-all`. A cluster `cluster-example-dest` set up as a
+ destination cluster, including the `externalClusters` stanza with
+ connection parameters to the source cluster, including a role with
+ replication privilege.
+: [`subscription-example.yaml`](samples/subscription-example.yaml)
+
+All the above manifests create publications or subscriptions on the `app`
+database. The Database CRD offers a convenient way to create databases
+declaratively. With it, logical replication could be set up for arbitrary
+databases.
+Which brings us to the next section.
+
+## Declarative management of Postgres databases
+
+**A plain Database**
+: *Prerequisites*: an existing cluster `cluster-example`.
+: [`database-example.yaml`](samples/database-example.yaml)
+
+**A Database with ICU local specifications**
+: *Prerequisites*: an existing cluster `cluster-example` running Postgres 16
+ or more advanced.
+: [`database-example-icu.yaml`](samples/database-example-icu.yaml)
From 186f28b19fdeffeb20a4e428eb5ca683b200a7e1 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Thu, 5 Dec 2024 22:35:50 +0100
Subject: [PATCH 206/836] feat: add support for `maxConcurrentReconciles`
(#5678)
Support the `maxConcurrentReconciles` parameter for improved concurrency
management, except for backup operations. By default, the value is set
to 10, enabling the operator to handle larger deployments efficiently
out of the box.
This enhancement provides greater flexibility for tuning reconciliation
behaviour to suit diverse workloads and deployment sizes.
Closes #5687
Signed-off-by: Armando Ruocco
---
config/manager/manager.yaml | 1 +
internal/cmd/manager/controller/cmd.go | 8 ++++++++
internal/cmd/manager/controller/controller.go | 13 ++++++-------
internal/controller/cluster_controller.go | 6 +++++-
internal/controller/plugin_controller.go | 8 +++++++-
internal/controller/pooler_controller.go | 4 +++-
internal/controller/scheduledbackup_controller.go | 8 +++++++-
7 files changed, 37 insertions(+), 11 deletions(-)
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
index f0164e0937..312cdc57cd 100644
--- a/config/manager/manager.yaml
+++ b/config/manager/manager.yaml
@@ -38,6 +38,7 @@ spec:
args:
- controller
- --leader-elect
+ - --max-concurrent-reconciles=10
- --config-map-name=$(OPERATOR_DEPLOYMENT_NAME)-config
- --secret-name=$(OPERATOR_DEPLOYMENT_NAME)-config
- --webhook-port=9443
diff --git a/internal/cmd/manager/controller/cmd.go b/internal/cmd/manager/controller/cmd.go
index 9eb5629868..3a94375a80 100644
--- a/internal/cmd/manager/controller/cmd.go
+++ b/internal/cmd/manager/controller/cmd.go
@@ -34,6 +34,7 @@ func NewCmd() *cobra.Command {
var pprofHTTPServer bool
var leaderLeaseDuration int
var leaderRenewDeadline int
+ var maxConcurrentReconciles int
cmd := cobra.Command{
Use: "controller [flags]",
@@ -50,6 +51,7 @@ func NewCmd() *cobra.Command {
},
pprofHTTPServer,
port,
+ maxConcurrentReconciles,
configuration.Current,
)
},
@@ -77,6 +79,12 @@ func NewCmd() *cobra.Command {
false,
"If true it will start a pprof debug http server on localhost:6060. Defaults to false.",
)
+ cmd.Flags().IntVar(
+ &maxConcurrentReconciles,
+ "max-concurrent-reconciles",
+ 10,
+ "The maximum number of concurrent reconciles. Defaults to 10.",
+ )
return &cmd
}
diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go
index fe8028545d..84b452c908 100644
--- a/internal/cmd/manager/controller/controller.go
+++ b/internal/cmd/manager/controller/controller.go
@@ -95,6 +95,7 @@ func RunController(
leaderConfig leaderElectionConfiguration,
pprofDebug bool,
port int,
+ maxConcurrentReconciles int,
conf *configuration.Data,
) error {
ctx := context.Background()
@@ -222,7 +223,7 @@ func RunController(
mgr,
discoveryClient,
pluginRepository,
- ).SetupWithManager(ctx, mgr); err != nil {
+ ).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Cluster")
return err
}
@@ -236,10 +237,8 @@ func RunController(
return err
}
- if err = controller.NewPluginReconciler(
- mgr,
- pluginRepository,
- ).SetupWithManager(mgr, configuration.Current.OperatorNamespace); err != nil {
+ if err = controller.NewPluginReconciler(mgr, pluginRepository).
+ SetupWithManager(mgr, configuration.Current.OperatorNamespace, maxConcurrentReconciles); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Plugin")
return err
}
@@ -248,7 +247,7 @@ func RunController(
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("cloudnative-pg-scheduledbackup"),
- }).SetupWithManager(ctx, mgr); err != nil {
+ }).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ScheduledBackup")
return err
}
@@ -258,7 +257,7 @@ func RunController(
DiscoveryClient: discoveryClient,
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("cloudnative-pg-pooler"),
- }).SetupWithManager(mgr); err != nil {
+ }).SetupWithManager(mgr, maxConcurrentReconciles); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Pooler")
return err
}
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 606ecab88c..2c7dc542af 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -37,6 +37,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
@@ -1021,13 +1022,16 @@ func (r *ClusterReconciler) handleRollingUpdate(
}
// SetupWithManager creates a ClusterReconciler
-func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
+func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, maxConcurrentReconciles int) error {
err := r.createFieldIndexes(ctx, mgr)
if err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
+ WithOptions(controller.Options{
+ MaxConcurrentReconciles: maxConcurrentReconciles,
+ }).
For(&apiv1.Cluster{}).
Named("cluster").
Owns(&corev1.Pod{}).
diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go
index 7d4e606e2e..0e393f1525 100644
--- a/internal/controller/plugin_controller.go
+++ b/internal/controller/plugin_controller.go
@@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
@@ -207,7 +208,11 @@ func (r *PluginReconciler) getSecret(
}
// SetupWithManager adds this PluginReconciler to the passed controller manager
-func (r *PluginReconciler) SetupWithManager(mgr ctrl.Manager, operatorNamespace string) error {
+func (r *PluginReconciler) SetupWithManager(
+ mgr ctrl.Manager,
+ operatorNamespace string,
+ maxConcurrentReconciles int,
+) error {
pluginServicesPredicate := predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
return isPluginService(e.Object, operatorNamespace)
@@ -224,6 +229,7 @@ func (r *PluginReconciler) SetupWithManager(mgr ctrl.Manager, operatorNamespace
}
return ctrl.NewControllerManagedBy(mgr).
+ WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
For(&corev1.Service{}).
Named("plugin").
WithEventFilter(pluginServicesPredicate).
diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go
index af54dbb344..e1d343f35b 100644
--- a/internal/controller/pooler_controller.go
+++ b/internal/controller/pooler_controller.go
@@ -35,6 +35,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -126,8 +127,9 @@ func (r *PoolerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
}
// SetupWithManager setup this controller inside the controller manager
-func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager) error {
+func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int) error {
return ctrl.NewControllerManagedBy(mgr).
+ WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
For(&apiv1.Pooler{}).
Named("pooler").
Owns(&v1.Deployment{}).
diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go
index d3a96d1f55..8d4d3fa248 100644
--- a/internal/controller/scheduledbackup_controller.go
+++ b/internal/controller/scheduledbackup_controller.go
@@ -33,6 +33,7 @@ import (
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
@@ -326,7 +327,11 @@ func (r *ScheduledBackupReconciler) GetChildBackups(
}
// SetupWithManager install this controller in the controller manager
-func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
+func (r *ScheduledBackupReconciler) SetupWithManager(
+ ctx context.Context,
+ mgr ctrl.Manager,
+ maxConcurrentReconciles int,
+) error {
// Create a new indexed field on backups. This field will be used to easily
// find all the backups created by this controller
if err := mgr.GetFieldIndexer().IndexField(
@@ -353,6 +358,7 @@ func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ct
}
return ctrl.NewControllerManagedBy(mgr).
+ WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
For(&apiv1.ScheduledBackup{}).
Named("scheduled-backup").
Complete(r)
From 9c98fbc2c732f3b83ae1940fa6205e8f3fa3d95f Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Fri, 6 Dec 2024 07:55:40 +0100
Subject: [PATCH 207/836] docs: add a sample file for Scaleway object storage
(#6143)
Signed-off-by: Pierrick
---
docs/src/samples.md | 8 ++++++-
.../cluster-exemple-with-backup-scaleway.yaml | 23 +++++++++++++++++++
2 files changed, 30 insertions(+), 1 deletion(-)
create mode 100644 docs/src/samples/cluster-exemple-with-backup-scaleway.yaml
diff --git a/docs/src/samples.md b/docs/src/samples.md
index a6ef595d7c..823b4a0f6f 100644
--- a/docs/src/samples.md
+++ b/docs/src/samples.md
@@ -62,12 +62,18 @@ your PostgreSQL cluster.
: [`backup-example.yaml`](samples/backup-example.yaml):
An example of a backup that runs against the previous sample.
-**Simple cluster with backup configured**
+**Simple cluster with backup configured for minio**
: *Prerequisites*: The configuration assumes minio is running and working.
Update `backup.barmanObjectStore` with your minio parameters or your cloud solution.
: [`cluster-example-with-backup.yaml`](samples/cluster-example-with-backup.yaml)
A basic cluster with backups configured.
+**Simple cluster with backup configured for Scaleway Object Storage**
+: *Prerequisites*: The configuration assumes a Scaleway Object Storage bucket exists.
+ Update `backup.barmanObjectStore` with your Scaleway parameters.
+: [`cluster-example-with-backup-scaleway.yaml`](samples/cluster-example-with-backup-scaleway.yaml)
+ A basic cluster with backups configured to work with Scaleway Object Storage..
+
## Replica clusters
**Replica cluster by way of backup from an object store**
diff --git a/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml b/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml
new file mode 100644
index 0000000000..b9f7905edb
--- /dev/null
+++ b/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml
@@ -0,0 +1,23 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: pg-backup-scaleway
+spec:
+ instances: 3
+ storage:
+ storageClass: standard
+ size: 1Gi
+ backup:
+ barmanObjectStore:
+ destinationPath: "s3:///backups/" # change with your bucket's name.
+ endpointURL: "https://s3..scw.cloud" # change with your bucket's location/region.
+ s3Credentials:
+ accessKeyId:
+ name: scaleway
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: scaleway
+ key: ACCESS_SECRET_KEY
+ region:
+ name: scaleway
+ key: ACCESS_REGION
From 5ea937b7c4f39fafd634e4698abc666c4b367c21 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Fri, 6 Dec 2024 07:59:05 +0100
Subject: [PATCH 208/836] fix(plugin): avoid displaying physical backups block
when empty (#5998)
Signed-off-by: Gabriele Bartolini
---
internal/cmd/plugin/status/status.go | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go
index c55035a8db..c27018f386 100644
--- a/internal/cmd/plugin/status/status.go
+++ b/internal/cmd/plugin/status/status.go
@@ -998,10 +998,12 @@ func (fullStatus *PostgresqlStatus) printBasebackupStatus(verbosity int) {
return
}
- if verbosity > 0 && len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 {
- fmt.Println(aurora.Green(header))
- fmt.Println(aurora.Yellow("No running physical backups found").String())
- fmt.Println()
+ if len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 {
+ if verbosity > 0 {
+ fmt.Println(aurora.Green(header))
+ fmt.Println(aurora.Yellow("No running physical backups found").String())
+ fmt.Println()
+ }
return
}
From c23abee6cbe00088a59125031917dfd1730f6315 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Fri, 6 Dec 2024 10:17:28 +0100
Subject: [PATCH 209/836] chore: drop support for `pg_rewind` in PostgreSQL 12
(#6157)
Starting with PostgreSQL 13, `pg_rewind` automatically performs crash recovery
before starting. This eliminates the need to manually restart the postmaster
after a failed invocation of `pg_rewind`.
This patch removes the code that manually handled the crash recovery process,
simplifying the implementation by leveraging the improved functionality in
PostgreSQL 13 and later versions.
Closes: #6156
Signed-off-by: Leonardo Cecchi
---
.../management/controller/instance_startup.go | 18 +-----------------
pkg/management/postgres/instance.go | 16 ----------------
2 files changed, 1 insertion(+), 33 deletions(-)
diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go
index ee81ed483c..c5c8ffcf12 100644
--- a/internal/management/controller/instance_startup.go
+++ b/internal/management/controller/instance_startup.go
@@ -270,23 +270,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context
// retrying after having started up the instance.
err = r.instance.Rewind(ctx, pgVersion)
if err != nil {
- contextLogger.Info(
- "pg_rewind failed, starting the server to complete the crash recovery",
- "err", err)
-
- // pg_rewind requires a clean shutdown of the old primary to work.
- // The only way to do that is to start the server again
- // and wait for it to be available again.
- err = r.instance.CompleteCrashRecovery(ctx)
- if err != nil {
- return err
- }
-
- // Then let's go back to the point of the new primary
- err = r.instance.Rewind(ctx, pgVersion)
- if err != nil {
- return err
- }
+ return fmt.Errorf("while exucuting pg_rewind: %w", err)
}
// Now I can demote myself
diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go
index 42e538983d..2a207a63e4 100644
--- a/pkg/management/postgres/instance.go
+++ b/pkg/management/postgres/instance.go
@@ -829,22 +829,6 @@ func (instance *Instance) WaitForPrimaryAvailable(ctx context.Context) error {
return waitForConnectionAvailable(ctx, db)
}
-// CompleteCrashRecovery temporary starts up the server and wait for it
-// to be fully available for queries. This will ensure that the crash recovery
-// is fully done.
-// Important: this function must be called only when the instance isn't started
-func (instance *Instance) CompleteCrashRecovery(ctx context.Context) error {
- log.Info("Waiting for server to complete crash recovery")
-
- defer func() {
- instance.ShutdownConnections()
- }()
-
- return instance.WithActiveInstance(func() error {
- return instance.WaitForSuperuserConnectionAvailable(ctx)
- })
-}
-
// WaitForSuperuserConnectionAvailable waits until we can connect to this
// instance using the superuser account
func (instance *Instance) WaitForSuperuserConnectionAvailable(ctx context.Context) error {
From c61ec6f5e5badc67bb5d025a6be4d08a1c855d94 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Fri, 6 Dec 2024 10:53:56 +0100
Subject: [PATCH 210/836] chore(lint): spellcheck word list (#6290)
Signed-off-by: Leonardo Cecchi
---
.wordlist-en-custom.txt | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 9216cd19d7..42d7a6fa5b 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -361,9 +361,7 @@ README
RHSA
RLS
RPO
-rpo
RTO
-rto
RUNTIME
ReadWriteOnce
RedHat
@@ -404,6 +402,7 @@ SQLRefs
SSL
SSZ
STORAGEACCOUNTNAME
+Scaleway
ScheduledBackup
ScheduledBackupList
ScheduledBackupSpec
@@ -1166,6 +1165,8 @@ robfig
roleRef
rollingupdatestatus
rollout
+rpo
+rto
runonserver
runtime
rw
@@ -1174,6 +1175,7 @@ sa
sas
scalability
scalable
+scaleway
sccs
scheduledbackup
scheduledbackuplist
From 767b53f14bb7bc76776dd13d685da57a58fc1a73 Mon Sep 17 00:00:00 2001
From: Ari Becker
Date: Fri, 6 Dec 2024 12:29:50 +0200
Subject: [PATCH 211/836] chore(plugin): improve getting instance status from
pod error message (#6092)
Closes #6057
Signed-off-by: Ari Becker
---
internal/plugin/resources/instance.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go
index 1ba6a58d47..03d604e67e 100644
--- a/internal/plugin/resources/instance.go
+++ b/internal/plugin/resources/instance.go
@@ -112,7 +112,9 @@ func getInstanceStatusFromPod(
DoRaw(ctx)
if err != nil {
result.AddPod(pod)
- result.Error = err
+ result.Error = fmt.Errorf(
+ "failed to get status by proxying to the pod, you might lack permissions to get pods/proxy: %w",
+ err)
return result
}
From a0448387e4e4aec7a5019b4dc9d15538cef4468d Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Fri, 6 Dec 2024 11:44:28 +0100
Subject: [PATCH 212/836] chore(e2e): Separate forward connection from psql
connection (#5898)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Refactors the code to decouple the forward connection from the psql
connection.
Prepares the forward connection for reuse in other tests, such as
MinIO.
Closes #5880
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Armando Ruocco
Signed-off-by: Marco Nenciarini
Signed-off-by: Niccolò Fei
Co-authored-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
Co-authored-by: Niccolò Fei
---
tests/e2e/asserts_test.go | 229 ++++++-------
tests/e2e/connection_test.go | 16 +-
tests/e2e/managed_roles_test.go | 305 ++++++------------
tests/e2e/pg_basebackup_test.go | 8 +-
tests/e2e/update_user_test.go | 15 +-
tests/utils/forwardconnection/doc.go | 20 ++
.../forwardconnection/forwardconnection.go | 218 +++++++++++++
tests/utils/psql_connection.go | 195 ++++++-----
tests/utils/service.go | 14 +
9 files changed, 572 insertions(+), 448 deletions(-)
create mode 100644 tests/utils/forwardconnection/doc.go
create mode 100644 tests/utils/forwardconnection/forwardconnection.go
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 5bdf763c9a..a7a5eb21ee 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -349,6 +349,8 @@ func AssertUpdateSecret(
env *testsUtils.TestingEnvironment,
) {
var secret corev1.Secret
+
+ // Gather the secret
Eventually(func(g Gomega) {
err := env.Client.Get(env.Ctx,
ctrlclient.ObjectKey{Namespace: namespace, Name: secretName},
@@ -356,13 +358,14 @@ func AssertUpdateSecret(
g.Expect(err).ToNot(HaveOccurred())
}).Should(Succeed())
+ // Change the given field to the new value provided
secret.Data[field] = []byte(value)
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
return env.Client.Update(env.Ctx, &secret)
})
Expect(err).ToNot(HaveOccurred())
- // Wait for the cluster pickup the updated secrets version first
+ // Wait for the cluster to pick up the updated secrets version first
Eventually(func() string {
cluster, err := env.GetCluster(namespace, clusterName)
if err != nil {
@@ -371,13 +374,23 @@ func AssertUpdateSecret(
}
switch {
case strings.HasSuffix(secretName, apiv1.ApplicationUserSecretSuffix):
- GinkgoWriter.Printf("Resource version of Application secret referenced in the cluster is %v\n",
+ GinkgoWriter.Printf("Resource version of %s secret referenced in the cluster is %v\n",
+ secretName,
cluster.Status.SecretsResourceVersion.ApplicationSecretVersion)
return cluster.Status.SecretsResourceVersion.ApplicationSecretVersion
+
case strings.HasSuffix(secretName, apiv1.SuperUserSecretSuffix):
- GinkgoWriter.Printf("Resource version of Superuser secret referenced in the cluster is %v\n",
+ GinkgoWriter.Printf("Resource version of %s secret referenced in the cluster is %v\n",
+ secretName,
cluster.Status.SecretsResourceVersion.SuperuserSecretVersion)
return cluster.Status.SecretsResourceVersion.SuperuserSecretVersion
+
+ case cluster.UsesSecretInManagedRoles(secretName):
+ GinkgoWriter.Printf("Resource version of %s ManagedRole secret referenced in the cluster is %v\n",
+ secretName,
+ cluster.Status.SecretsResourceVersion.ManagedRoleSecretVersions[secretName])
+ return cluster.Status.SecretsResourceVersion.ManagedRoleSecretVersions[secretName]
+
default:
GinkgoWriter.Printf("Unsupported secrets name found %v\n", secretName)
return ""
@@ -385,28 +398,36 @@ func AssertUpdateSecret(
}, timeout).Should(BeEquivalentTo(secret.ResourceVersion))
}
-// AssertConnection is used if a connection from a pod to a postgresql
-// database works
+// AssertConnection is used if a connection from a pod to a postgresql database works
func AssertConnection(
- host string,
- user string,
+ namespace string,
+ service string,
dbname string,
+ user string,
password string,
- queryingPod *corev1.Pod,
- timeout int,
env *testsUtils.TestingEnvironment,
) {
- By(fmt.Sprintf("connecting to the %v service as %v", host, user), func() {
- Eventually(func() string {
- dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", host, user, dbname, password)
- commandTimeout := time.Second * 10
- stdout, _, err := env.ExecCommand(env.Ctx, *queryingPod, specs.PostgresContainerName, &commandTimeout,
- "psql", dsn, "-tAc", "SELECT 1")
- if err != nil {
- return ""
- }
- return stdout
- }, timeout).Should(Equal("1\n"))
+ By(fmt.Sprintf("checking that %v service exists", service), func() {
+ Eventually(func(g Gomega) {
+ _, err := testsUtils.GetService(namespace, service, env)
+ g.Expect(err).ToNot(HaveOccurred())
+ }, RetryTimeout).Should(Succeed())
+ })
+
+ By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() {
+ forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
+ dbname, user, password)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ Expect(err).ToNot(HaveOccurred())
+
+ var rawValue string
+ row := conn.QueryRow("SELECT 1")
+ err = row.Scan(&rawValue)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1"))
})
}
@@ -508,24 +529,26 @@ func AssertDatabaseExists(pod *corev1.Pod, databaseName string, expectedValue bo
// AssertUserExists assert if user exists
func AssertUserExists(pod *corev1.Pod, userName string, expectedValue bool) {
By(fmt.Sprintf("verifying if user %v exists", userName), func() {
- query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_user WHERE lower(usename) = lower('%v'));", userName)
- stdout, stderr, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
- Namespace: pod.Namespace,
- PodName: pod.Name,
- },
- testsUtils.PostgresDBName,
- query)
- if err != nil {
- GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr)
- }
- Expect(err).ToNot(HaveOccurred())
+ query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE lower(rolname) = lower('%v'));", userName)
+ Eventually(func(g Gomega) {
+ stdout, stderr, err := env.ExecQueryInInstancePod(
+ testsUtils.PodLocator{
+ Namespace: pod.Namespace,
+ PodName: pod.Name,
+ },
+ testsUtils.PostgresDBName,
+ query)
+ if err != nil {
+ GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr)
+ }
+ g.Expect(err).ToNot(HaveOccurred())
- if expectedValue {
- Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t"))
- } else {
- Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f"))
- }
+ if expectedValue {
+ g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t"))
+ } else {
+ g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f"))
+ }
+ }, 60).Should(Succeed())
})
}
@@ -1079,57 +1102,53 @@ func AssertDetachReplicaModeCluster(
})
}
-func AssertWritesToReplicaFails(
- connectingPod *corev1.Pod,
- service string,
- appDBName string,
- appDBUser string,
- appDBPass string,
-) {
- By(fmt.Sprintf("Verifying %v service doesn't allow writes", service),
- func() {
- timeout := time.Second * 10
- dsn := testsUtils.CreateDSN(service, appDBUser, appDBName, appDBPass, testsUtils.Require, 5432)
-
- // Expect to be connected to a replica
- stdout, _, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout,
- "psql", dsn, "-tAc", "select pg_is_in_recovery()")
- value := strings.Trim(stdout, "\n")
- Expect(value, err).To(Equal("t"))
-
- // Expect to be in a read-only transaction
- _, _, err = utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, *connectingPod,
- specs.PostgresContainerName, &timeout,
- "psql", dsn, "-tAc", "CREATE TABLE IF NOT EXISTS table1(var1 text);")
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).Should(
- ContainSubstring("cannot execute CREATE TABLE in a read-only transaction"))
- })
+func AssertWritesToReplicaFails(namespace, service, appDBName, appDBUser, appDBPass string) {
+ By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() {
+ forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
+ appDBName, appDBUser, appDBPass)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ Expect(err).ToNot(HaveOccurred())
+
+ var rawValue string
+ // Expect to be connected to a replica
+ row := conn.QueryRow("SELECT pg_is_in_recovery()")
+ err = row.Scan(&rawValue)
+ Expect(err).ToNot(HaveOccurred())
+ isReplica := strings.TrimSpace(rawValue)
+ Expect(isReplica).To(BeEquivalentTo("true"))
+
+ // Expect to be in a read-only transaction
+ _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)")
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction"))
+ })
}
-func AssertWritesToPrimarySucceeds(
- connectingPod *corev1.Pod,
- service string,
- appDBName string,
- appDBUser string,
- appDBPass string,
-) {
- By(fmt.Sprintf("Verifying %v service correctly manages writes", service),
- func() {
- timeout := time.Second * 10
- dsn := testsUtils.CreateDSN(service, appDBUser, appDBName, appDBPass, testsUtils.Require, 5432)
+func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) {
+ By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() {
+ forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
+ appDBName, appDBUser, appDBPass)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ Expect(err).ToNot(HaveOccurred())
- // Expect to be connected to a primary
- stdout, _, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout,
- "psql", dsn, "-tAc", "select pg_is_in_recovery()")
- value := strings.Trim(stdout, "\n")
- Expect(value, err).To(Equal("f"))
+ var rawValue string
+ // Expect to be connected to a primary
+ row := conn.QueryRow("SELECT pg_is_in_recovery()")
+ err = row.Scan(&rawValue)
+ Expect(err).ToNot(HaveOccurred())
+ isReplica := strings.TrimSpace(rawValue)
+ Expect(isReplica).To(BeEquivalentTo("false"))
- // Expect to be able to write
- _, _, err = env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout,
- "psql", dsn, "-tAc", "CREATE TABLE IF NOT EXISTS table1(var1 text);")
- Expect(err).ToNot(HaveOccurred())
- })
+ // Expect to be able to write
+ _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)")
+ Expect(err).ToNot(HaveOccurred())
+ })
}
func AssertFastFailOver(
@@ -1346,7 +1365,6 @@ func AssertApplicationDatabaseConnection(
appDB,
appPassword,
appSecretName string,
- pod *corev1.Pod,
) {
By("checking cluster can connect with application database user and password", func() {
// Get the app user password from the auto generated -app secret if appPassword is not provided
@@ -1363,10 +1381,9 @@ func AssertApplicationDatabaseConnection(
Expect(err).ToNot(HaveOccurred())
appPassword = string(appSecret.Data["password"])
}
- // rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
- rwService := testsUtils.CreateServiceFQDN(namespace, testsUtils.GetReadWriteServiceName(clusterName))
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
- AssertConnection(rwService, appUser, appDB, appPassword, pod, 60, env)
+ AssertConnection(namespace, rwService, appDB, appUser, appPassword, env)
})
}
@@ -1578,33 +1595,26 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN
secretName := restoredClusterName + apiv1.ApplicationUserSecretSuffix
By("checking the restored cluster with pre-defined app password connectable", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, restoredClusterName)
- Expect(err).ToNot(HaveOccurred())
AssertApplicationDatabaseConnection(
namespace,
restoredClusterName,
appUser,
testsUtils.AppDBName,
appUserPass,
- secretName,
- primaryPod)
+ secretName)
})
By("update user application password for restored cluster and verify connectivity", func() {
const newPassword = "eeh2Zahohx" //nolint:gosec
AssertUpdateSecret("password", newPassword, secretName, namespace, restoredClusterName, 30, env)
- primaryPod, err := env.GetClusterPrimary(namespace, restoredClusterName)
- Expect(err).ToNot(HaveOccurred())
-
AssertApplicationDatabaseConnection(
namespace,
restoredClusterName,
appUser,
testsUtils.AppDBName,
newPassword,
- secretName,
- primaryPod)
+ secretName)
})
}
@@ -1820,9 +1830,6 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
env)
Expect(err).ToNot(HaveOccurred())
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
By("checking the restored cluster with auto generated app password connectable", func() {
AssertApplicationDatabaseConnection(
namespace,
@@ -1830,8 +1837,7 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
appUser,
testsUtils.AppDBName,
appUserPass,
- secretName,
- primaryPod)
+ secretName)
})
By("update user application password for restored cluster and verify connectivity", func() {
@@ -1843,8 +1849,7 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
appUser,
testsUtils.AppDBName,
newPassword,
- secretName,
- primaryPod)
+ secretName)
})
}
@@ -2001,23 +2006,20 @@ func assertReadWriteConnectionUsingPgBouncerService(
poolerYamlFilePath string,
isPoolerRW bool,
) {
- poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
- poolerService := testsUtils.CreateServiceFQDN(namespace, poolerName)
appUser, generatedAppUserPassword, err := testsUtils.GetCredentials(
clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env)
Expect(err).ToNot(HaveOccurred())
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
- AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, primaryPod, 180, env)
+ AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env)
// verify that, if pooler type setup read write then it will allow both read and
// write operations or if pooler type setup read only then it will allow only read operations
if isPoolerRW {
- AssertWritesToPrimarySucceeds(primaryPod, poolerService, "app", appUser,
+ AssertWritesToPrimarySucceeds(namespace, poolerService, "app", appUser,
generatedAppUserPassword)
} else {
- AssertWritesToReplicaFails(primaryPod, poolerService, "app", appUser,
+ AssertWritesToReplicaFails(namespace, poolerService, "app", appUser,
generatedAppUserPassword)
}
}
@@ -2344,13 +2346,12 @@ func DeleteTableUsingPgBouncerService(
env *testsUtils.TestingEnvironment,
pod *corev1.Pod,
) {
- poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
- poolerService := testsUtils.CreateServiceFQDN(namespace, poolerName)
appUser, generatedAppUserPassword, err := testsUtils.GetCredentials(
clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env)
Expect(err).ToNot(HaveOccurred())
- AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, pod, 180, env)
+ AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env)
connectionTimeout := time.Second * 10
dsn := testsUtils.CreateDSN(poolerService, appUser, testsUtils.AppDBName, generatedAppUserPassword,
diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go
index 67227ca118..fd962159f0 100644
--- a/tests/e2e/connection_test.go
+++ b/tests/e2e/connection_test.go
@@ -53,20 +53,18 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity
superuserPassword string,
env *utils.TestingEnvironment,
) {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
// We test -rw, -ro and -r services with the app user and the superuser
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
- rService := fmt.Sprintf("%v-r.%v.svc", clusterName, namespace)
- roService := fmt.Sprintf("%v-ro.%v.svc", clusterName, namespace)
+ rwService := fmt.Sprintf("%v-rw", clusterName)
+ rService := fmt.Sprintf("%v-r", clusterName)
+ roService := fmt.Sprintf("%v-ro", clusterName)
services := []string{rwService, roService, rService}
for _, service := range services {
- AssertConnection(service, "postgres", appDBName, superuserPassword, primaryPod, 10, env)
- AssertConnection(service, appDBUser, appDBName, appPassword, primaryPod, 10, env)
+ AssertConnection(namespace, service, appDBName, utils.PostgresDBName, superuserPassword, env)
+ AssertConnection(namespace, service, appDBName, appDBUser, appPassword, env)
}
- AssertWritesToReplicaFails(primaryPod, roService, appDBName, appDBUser, appPassword)
- AssertWritesToPrimarySucceeds(primaryPod, rwService, appDBName, appDBUser, appPassword)
+ AssertWritesToReplicaFails(namespace, roService, appDBName, appDBUser, appPassword)
+ AssertWritesToPrimarySucceeds(namespace, rwService, appDBName, appDBUser, appPassword)
}
Context("Auto-generated passwords", func() {
diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go
index 534867203d..fc5dd5f314 100644
--- a/tests/e2e/managed_roles_test.go
+++ b/tests/e2e/managed_roles_test.go
@@ -29,7 +29,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils"
@@ -56,6 +55,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Context("plain vanilla cluster", Ordered, func() {
const (
namespacePrefix = "managed-roles"
+ secretName = "cluster-example-dante"
username = "dante"
appUsername = "app"
password = "dante"
@@ -64,8 +64,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
userWithPerpetualPass = "boccaccio"
userWithHashedPassword = "cavalcanti"
)
- var clusterName, secretName, namespace string
- var secretNameSpacedName *types.NamespacedName
+ var clusterName, namespace string
BeforeAll(func() {
var err error
@@ -76,35 +75,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
clusterName, err = env.GetResourceNameFromYAML(clusterManifest)
Expect(err).ToNot(HaveOccurred())
- secretName = "cluster-example-dante"
- secretNameSpacedName = &types.NamespacedName{
- Namespace: namespace,
- Name: secretName,
- }
-
By("setting up cluster with managed roles", func() {
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
})
})
- assertUserExists := func(namespace, primaryPod, username string, shouldExists bool) {
- Eventually(func(g Gomega) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPod,
- },
- utils.PostgresDBName,
- "\\du")
- g.Expect(err).ToNot(HaveOccurred())
- if shouldExists {
- g.Expect(stdout).To(ContainSubstring(username))
- } else {
- g.Expect(stdout).NotTo(ContainSubstring(username))
- }
- }, 60).Should(Succeed())
- }
-
assertInRoles := func(namespace, primaryPod, roleName string, expectedRoles []string) {
slices.Sort(expectedRoles)
Eventually(func() []string {
@@ -132,6 +107,24 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
}, 30).Should(BeEquivalentTo(expectedRoles))
}
+ assertRoleStatus := func(namespace, clusterName, query, expectedResult string) {
+ primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() string {
+ stdout, _, err := env.ExecQueryInInstancePod(
+ utils.PodLocator{
+ Namespace: namespace,
+ PodName: primaryPod.Name,
+ },
+ utils.PostgresDBName,
+ query)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(stdout)
+ }, 30).Should(Equal(expectedResult))
+ }
+
It("can create roles specified in the managed roles stanza", func() {
rolCanLoginInSpec := true
rolSuperInSpec := false
@@ -143,13 +136,13 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
rolConnLimitInSpec := 4
By("ensuring the roles created in the managed stanza are in the database with correct attributes", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- assertUserExists(namespace, primaryPodInfo.Name, username, true)
- assertUserExists(namespace, primaryPodInfo.Name, userWithPerpetualPass, true)
- assertUserExists(namespace, primaryPodInfo.Name, userWithHashedPassword, true)
- assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, false)
+ AssertUserExists(primaryPod, username, true)
+ AssertUserExists(primaryPod, userWithPerpetualPass, true)
+ AssertUserExists(primaryPod, userWithHashedPassword, true)
+ AssertUserExists(primaryPod, unrealizableUser, false)
query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+
"and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+
@@ -160,8 +153,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
for _, q := range []string{query, query2} {
stdout, _, err := env.ExecQueryInInstancePod(
utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
+ Namespace: primaryPod.Namespace,
+ PodName: primaryPod.Name,
},
utils.PostgresDBName,
q)
@@ -171,34 +164,21 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verifying connectivity of new managed role", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
+ rwService := utils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(rwService, username, "postgres", password, primaryPod, 30, env)
-
- AssertConnection(rwService, userWithHashedPassword, "postgres", userWithHashedPassword, primaryPod, 30, env)
+ AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env)
+ AssertConnection(namespace, rwService, utils.PostgresDBName, userWithHashedPassword, userWithHashedPassword, env)
})
By("ensuring the app role has been granted createdb in the managed stanza", func() {
primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- assertUserExists(namespace, primaryPodInfo.Name, appUsername, true)
+ AssertUserExists(primaryPodInfo, appUsername, true)
query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+
"FROM pg_roles WHERE rolname='%s'", appUsername)
-
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
- },
- utils.PostgresDBName,
- query)
- Expect(err).ToNot(HaveOccurred())
- Expect(stdout).To(Equal("t\n"))
+ assertRoleStatus(namespace, clusterName, query, "t")
})
By("verifying connectivity of app user", func() {
@@ -213,13 +193,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
)
Expect(err).NotTo(HaveOccurred())
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
pass := string(appUserSecret.Data["password"])
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
+ rwService := utils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(rwService, appUsername, "postgres", pass, primaryPod, 30, env)
+ AssertConnection(namespace, rwService, utils.PostgresDBName, appUsername, pass, env)
})
By("Verify show unrealizable role configurations in the status", func() {
@@ -239,13 +216,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
It("can update role attributes in the spec and they are applied in the database", func() {
- var primaryPod *corev1.Pod
- var err error
expectedLogin := false
expectedCreateDB := false
expectedCreateRole := true
expectedConnLmt := int64(10)
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
+ rwService := utils.GetReadWriteServiceName(clusterName)
By("updating role attribute in spec", func() {
cluster, err := env.GetCluster(namespace, clusterName)
@@ -261,35 +236,24 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verify the role has been updated in the database", func() {
- primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
- Eventually(func() string {
- query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+
- "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v",
- username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt)
-
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPod.Name,
- },
- utils.PostgresDBName,
- query)
- if err != nil {
- return ""
- }
- return stdout
- }, 30).Should(Equal("1\n"))
+ query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+
+ "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v",
+ username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt)
+ assertRoleStatus(namespace, clusterName, query, "1")
})
By("the connection should fail since we disabled the login", func() {
- dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require",
- rwService, username, "postgres", password)
- timeout := time.Second * 10
- _, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout,
- "psql", dsn, "-tAc", "SELECT 1")
+ forwardConn, conn, err := utils.ForwardPSQLServiceConnection(env, namespace, rwService,
+ utils.PostgresDBName, username, password)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ Expect(err).ToNot(HaveOccurred())
+
+ _, err = conn.Exec("SELECT 1")
Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("not permitted to log in"))
})
By("enable Login again", func() {
@@ -301,16 +265,22 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
})
+ By("verifying Login is now enabled", func() {
+ expectedLogin = true
+ query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+
+ "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v",
+ username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt)
+ assertRoleStatus(namespace, clusterName, query, "1")
+ })
+
By("the connectivity should be success again", func() {
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
+ rwService := utils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(rwService, username, "postgres", password, primaryPod, 30, env)
+ AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env)
})
})
It("Can add role with all attribute omitted and verify it is default", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
const (
defaultRolCanLogin = false
defaultRolSuper = false
@@ -335,26 +305,14 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verify new_role exists with all attribute default", func() {
- Eventually(func() string {
- query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+
- "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+
- "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin,
- defaultRolSuper, defaultRolCreateDB,
- defaultRolCreateRole, defaultRolInherit, defaultRolReplication,
- defaultRolByPassRLS, defaultRolConnLimit)
+ query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+
+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+
+ "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin,
+ defaultRolSuper, defaultRolCreateDB,
+ defaultRolCreateRole, defaultRolInherit, defaultRolReplication,
+ defaultRolByPassRLS, defaultRolConnLimit)
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
- },
- utils.PostgresDBName,
- query)
- if err != nil {
- return ""
- }
- return stdout
- }, 30).Should(Equal("1\n"))
+ assertRoleStatus(namespace, clusterName, query, "1")
})
})
@@ -376,52 +334,23 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
})
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
By(fmt.Sprintf("Verify comments update in db for %s", newUserName), func() {
- Eventually(func() string {
- query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+
- " FROM pg_catalog.pg_authid WHERE rolname='%s'",
- newUserName)
-
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
- },
- utils.PostgresDBName,
- query)
- if err != nil {
- return ERROR
- }
- return stdout
- }, 30).Should(Equal(fmt.Sprintf("This is user %s\n", newUserName)))
+ query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+
+ " FROM pg_catalog.pg_authid WHERE rolname='%s'",
+ newUserName)
+ assertRoleStatus(namespace, clusterName, query, fmt.Sprintf("This is user %s", newUserName))
})
By(fmt.Sprintf("Verify comments update in db for %s", username), func() {
- Eventually(func() string {
- query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+
- " FROM pg_catalog.pg_authid WHERE rolname='%s'",
- username)
-
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
- },
- utils.PostgresDBName,
- query)
- if err != nil {
- return ERROR
- }
- return stdout
- }, 30).Should(Equal("\n"))
+ query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+
+ " FROM pg_catalog.pg_authid WHERE rolname='%s'",
+ username)
+ assertRoleStatus(namespace, clusterName, query, "")
})
})
It("Can update role membership and verify changes in db ", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("Remove invalid parent role from unrealizableUser and verify user in database", func() {
@@ -441,7 +370,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
- assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, true)
+ AssertUserExists(primaryPod, unrealizableUser, true)
})
By("Add role in InRole for role new_role and verify in database", func() {
@@ -464,7 +393,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
- assertInRoles(namespace, primaryPodInfo.Name, newUserName, []string{"postgres", username})
+ assertInRoles(namespace, primaryPod.Name, newUserName, []string{"postgres", username})
})
By("Remove parent role from InRole for role new_role and verify in database", func() {
@@ -486,7 +415,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
- assertInRoles(namespace, primaryPodInfo.Name, newUserName, []string{username})
+ assertInRoles(namespace, primaryPod.Name, newUserName, []string{username})
})
By("Mock the error for unrealizable User and verify user in database", func() {
@@ -502,7 +431,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
// user not changed
- assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, true)
+ AssertUserExists(primaryPod, unrealizableUser, true)
Eventually(func() int {
cluster, err := env.GetCluster(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
@@ -523,28 +452,21 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
It("Can update role password in secrets and db and verify the connectivity", func() {
- var primaryPod *corev1.Pod
var err error
newPassword := "ThisIsNew"
- By("update password from secrets", func() {
- var secret corev1.Secret
- err := env.Client.Get(env.Ctx, *secretNameSpacedName, &secret)
- Expect(err).ToNot(HaveOccurred())
+ primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
- updated := secret.DeepCopy()
- updated.Data["password"] = []byte(newPassword)
- err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(&secret))
- Expect(err).ToNot(HaveOccurred())
+ By("update password from secrets", func() {
+ AssertUpdateSecret("password", newPassword, secretName,
+ namespace, clusterName, 30, env)
})
By("Verify connectivity using changed password in secret", func() {
- primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
+ rwService := utils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(rwService, username, "postgres", newPassword, primaryPod, 30, env)
+ AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env)
})
By("Update password in database", func() {
@@ -561,9 +483,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
})
- By("Verify password in secrets could still valid", func() {
- rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace)
- AssertConnection(rwService, username, "postgres", newPassword, primaryPod, 60, env)
+ By("Verify password in secrets is still valid", func() {
+ rwService := utils.GetReadWriteServiceName(clusterName)
+ AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env)
})
})
@@ -589,47 +511,18 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
})
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
-
By(fmt.Sprintf("Verify valid until is removed in db for %s", newUserName), func() {
- Eventually(func() string {
- query := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_authid"+
- " WHERE rolname='%s'",
- newUserName)
-
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
- },
- utils.PostgresDBName,
- query)
- if err != nil {
- return ERROR
- }
- return stdout
- }).Should(Equal("t\n"))
+ query := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_authid"+
+ " WHERE rolname='%s'",
+ newUserName)
+ assertRoleStatus(namespace, clusterName, query, "t")
})
By(fmt.Sprintf("Verify valid until update in db for %s", username), func() {
- Eventually(func() string {
- query := fmt.Sprintf("SELECT rolvaliduntil='%s' FROM pg_catalog.pg_authid "+
- " WHERE rolname='%s'",
- newValidUntilString, username)
-
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
- Namespace: namespace,
- PodName: primaryPodInfo.Name,
- },
- utils.PostgresDBName,
- query)
- if err != nil {
- return ERROR
- }
- return stdout
- }, 30).Should(Equal("t\n"))
+ query := fmt.Sprintf("SELECT rolvaliduntil='%s' FROM pg_catalog.pg_authid "+
+ " WHERE rolname='%s'",
+ newValidUntilString, username)
+ assertRoleStatus(namespace, clusterName, query, "t")
})
})
@@ -649,9 +542,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verify new_role not existed in db", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- assertUserExists(namespace, primaryPodInfo.Name, newUserName, false)
+ AssertUserExists(primaryPod, newUserName, false)
})
})
})
diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go
index 6ec697fd4a..de4fdcd812 100644
--- a/tests/e2e/pg_basebackup_test.go
+++ b/tests/e2e/pg_basebackup_test.go
@@ -71,12 +71,9 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
secretName := dstClusterName + apiv1.ApplicationUserSecretSuffix
- primaryPod, err := env.GetClusterPrimary(namespace, dstClusterName)
- Expect(err).ToNot(HaveOccurred())
-
By("checking the dst cluster with auto generated app password connectable", func() {
AssertApplicationDatabaseConnection(namespace, dstClusterName,
- appUser, utils.AppDBName, "", secretName, primaryPod)
+ appUser, utils.AppDBName, "", secretName)
})
By("update user application password for dst cluster and verify connectivity", func() {
@@ -88,8 +85,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
appUser,
utils.AppDBName,
newPassword,
- secretName,
- primaryPod)
+ secretName)
})
By("checking data have been copied correctly", func() {
diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go
index d614e8ad75..b3824000c2 100644
--- a/tests/e2e/update_user_test.go
+++ b/tests/e2e/update_user_test.go
@@ -56,8 +56,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- host, err := testsUtils.GetHostName(namespace, clusterName, env)
- Expect(err).ToNot(HaveOccurred())
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
appSecretName := clusterName + apiv1.ApplicationUserSecretSuffix
superUserSecretName := clusterName + apiv1.SuperUserSecretSuffix
@@ -69,7 +68,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
const newPassword = "eeh2Zahohx" //nolint:gosec
AssertUpdateSecret("password", newPassword, appSecretName, namespace, clusterName, 30, env)
- AssertConnection(host, testsUtils.AppUser, testsUtils.AppDBName, newPassword, primaryPod, 60, env)
+ AssertConnection(namespace, rwService, testsUtils.AppDBName, testsUtils.AppUser, newPassword, env)
})
By("fail updating user application password with wrong user in secret", func() {
@@ -80,7 +79,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
AssertUpdateSecret("username", newUser, appSecretName, namespace, clusterName, 30, env)
timeout := time.Second * 10
- dsn := testsUtils.CreateDSN(host, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432)
+ dsn := testsUtils.CreateDSN(rwService, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432)
_, _, err := env.ExecCommand(env.Ctx, *primaryPod,
specs.PostgresContainerName, &timeout,
@@ -113,7 +112,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
const newPassword = "fi6uCae7" //nolint:gosec
AssertUpdateSecret("password", newPassword, superUserSecretName, namespace, clusterName, 30, env)
- AssertConnection(host, testsUtils.PostgresUser, testsUtils.PostgresDBName, newPassword, primaryPod, 60, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName, testsUtils.PostgresUser, newPassword, env)
})
})
})
@@ -141,6 +140,8 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
+
secretName := clusterName + apiv1.SuperUserSecretSuffix
var secret corev1.Secret
namespacedName := types.NamespacedName{
@@ -190,12 +191,10 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
g.Expect(err).ToNot(HaveOccurred())
}, 90).WithPolling(time.Second).Should(Succeed())
- host, err := testsUtils.GetHostName(namespace, clusterName, env)
- Expect(err).ToNot(HaveOccurred())
superUser, superUserPass, err := testsUtils.GetCredentials(clusterName, namespace,
apiv1.SuperUserSecretSuffix, env)
Expect(err).ToNot(HaveOccurred())
- AssertConnection(host, superUser, testsUtils.PostgresDBName, superUserPass, primaryPod, 60, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName, superUser, superUserPass, env)
})
By("disable superuser access", func() {
diff --git a/tests/utils/forwardconnection/doc.go b/tests/utils/forwardconnection/doc.go
new file mode 100644
index 0000000000..0e2f7af2bf
--- /dev/null
+++ b/tests/utils/forwardconnection/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package forwardconnection provides an easy interface to create
+// a port forward from the local test to a service or pod
+// inside the testing k8s cluster
+package forwardconnection
diff --git a/tests/utils/forwardconnection/forwardconnection.go b/tests/utils/forwardconnection/forwardconnection.go
new file mode 100644
index 0000000000..77b774ffc1
--- /dev/null
+++ b/tests/utils/forwardconnection/forwardconnection.go
@@ -0,0 +1,218 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package forwardconnection
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+
+ "github.com/onsi/ginkgo/v2"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/portforward"
+ "k8s.io/client-go/transport/spdy"
+)
+
+// PostgresPortMap is the default port map for the PostgreSQL Pod
+const PostgresPortMap = "0:5432"
+
+// ForwardConnection holds the necessary information to manage a port-forward
+// against a service of pod inside Kubernetes
+type ForwardConnection struct {
+ Forwarder *portforward.PortForwarder
+ stopChannel chan struct{}
+ readyChannel chan struct{}
+}
+
+// NewDialerFromService returns a Dialer against the service specified
+func NewDialerFromService(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ config *rest.Config,
+ namespace,
+ service string,
+) (dialer httpstream.Dialer, portMaps []string, err error) {
+ pod, portMap, err := getPodAndPortsFromService(ctx, kubeInterface, namespace, service)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dial, err := NewDialer(kubeInterface, config, namespace, pod)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return dial, portMap, nil
+}
+
+// NewForwardConnection returns a PortForwarder against the pod specified
+func NewForwardConnection(
+ dialer httpstream.Dialer,
+ portMaps []string,
+ outWriter,
+ errWriter io.Writer,
+) (*ForwardConnection, error) {
+ fc := &ForwardConnection{
+ stopChannel: make(chan struct{}),
+ readyChannel: make(chan struct{}, 1),
+ }
+
+ var err error
+ fc.Forwarder, err = portforward.New(
+ dialer,
+ portMaps,
+ fc.stopChannel,
+ fc.readyChannel,
+ outWriter,
+ errWriter,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return fc, nil
+}
+
+// NewDialer returns a Dialer to be used with a PortForwarder
+func NewDialer(
+ kubeInterface kubernetes.Interface,
+ config *rest.Config,
+ namespace string,
+ pod string,
+) (httpstream.Dialer, error) {
+ req := kubeInterface.CoreV1().
+ RESTClient().
+ Post().
+ Resource("pods").
+ Namespace(namespace).
+ Name(pod).
+ SubResource("portforward")
+
+ transport, upgrader, err := spdy.RoundTripperFor(config)
+ if err != nil {
+ return nil, err
+ }
+ dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
+ return dialer, nil
+}
+
+// StartAndWait begins the port-forwarding and waits until it's ready
+func (fc *ForwardConnection) StartAndWait() error {
+ var err error
+ go func() {
+ ginkgo.GinkgoWriter.Println("Starting port-forward")
+ err = fc.Forwarder.ForwardPorts()
+ if err != nil {
+ ginkgo.GinkgoWriter.Printf("port-forward failed with error %s\n", err.Error())
+ return
+ }
+ }()
+ select {
+ case <-fc.readyChannel:
+ ginkgo.GinkgoWriter.Println("port-forward ready")
+ return nil
+ case <-fc.stopChannel:
+ ginkgo.GinkgoWriter.Println("port-forward closed")
+ return err
+ }
+}
+
+// GetLocalPort will return the local port where the forward has started
+func (fc *ForwardConnection) GetLocalPort() (string, error) {
+ ports, err := fc.Forwarder.GetPorts()
+ if err != nil {
+ return "", err
+ }
+ return strconv.Itoa(int(ports[0].Local)), nil
+}
+
+// getPortMap takes the first port between the list of ports exposed by the given service, and
+// returns a map with 0 as the local port for auto-assignment
+func getPortMap(serviceObj *corev1.Service) ([]string, error) {
+ if len(serviceObj.Spec.Ports) == 0 {
+ return []string{}, fmt.Errorf("service %s has no ports", serviceObj.Name)
+ }
+ port := serviceObj.Spec.Ports[0].Port
+ return []string{fmt.Sprintf("0:%d", port)}, nil
+}
+
+func getPodAndPortsFromService(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ namespace,
+ service string,
+) (string, []string, error) {
+ serviceObj, err := getServiceObject(ctx, kubeInterface, namespace, service)
+ if err != nil {
+ return "", nil, err
+ }
+
+ podObj, err := getPodFromService(ctx, kubeInterface, serviceObj)
+ if err != nil {
+ return "", nil, err
+ }
+
+ portMaps, err := getPortMap(serviceObj)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return podObj.Name, portMaps, nil
+}
+
+func getServiceObject(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ namespace,
+ service string,
+) (*corev1.Service, error) {
+ return kubeInterface.CoreV1().Services(namespace).Get(ctx, service, metav1.GetOptions{})
+}
+
+func getPodFromService(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ serviceObj *corev1.Service,
+) (*corev1.Pod, error) {
+ namespace := serviceObj.Namespace
+
+ labelSelector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
+ MatchLabels: serviceObj.Spec.Selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ podList, err := kubeInterface.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
+ LabelSelector: labelSelector.String(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(podList.Items) == 0 {
+ return nil, fmt.Errorf("no pods found for service %s", serviceObj.Name)
+ }
+
+ return &podList.Items[0], nil
+}
diff --git a/tests/utils/psql_connection.go b/tests/utils/psql_connection.go
index a0d8a7a1fb..d3a24cc40a 100644
--- a/tests/utils/psql_connection.go
+++ b/tests/utils/psql_connection.go
@@ -18,128 +18,87 @@ package utils
import (
"database/sql"
- "fmt"
- "net/http"
- "os"
- "strconv"
+ "io"
"time"
- "github.com/onsi/ginkgo/v2"
- "k8s.io/client-go/rest"
+ "k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/client-go/tools/portforward"
- "k8s.io/client-go/transport/spdy"
"github.com/cloudnative-pg/cloudnative-pg/pkg/configfile"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/forwardconnection"
)
-// PSQLForwardConnection manage the creation of a port forward to connect by psql client locally
+// PSQLForwardConnection manages the creation of a port-forwarding to open a new database connection
type PSQLForwardConnection struct {
- namespace string
- pod string
- stopChan chan struct{}
- readyChan chan struct{}
- pooler *pool.ConnectionPool
+ pooler pool.Pooler
portForward *portforward.PortForwarder
- err error
}
-// psqlForwardConnectionNew initialize and create the proper forward configuration
-func psqlForwardConnectionNew(env *TestingEnvironment, namespace, pod string) (*PSQLForwardConnection, error) {
- psqlc := &PSQLForwardConnection{}
- if pod == "" {
- return nil, fmt.Errorf("pod not provided")
- }
- psqlc.namespace = namespace
- psqlc.pod = pod
+// Close will stop the port-forwarding and exit
+func (psqlc *PSQLForwardConnection) Close() {
+ psqlc.portForward.Close()
+}
- req := psqlc.createRequest(env)
+// GetPooler returns the connection Pooler
+func (psqlc *PSQLForwardConnection) GetPooler() pool.Pooler {
+ return psqlc.pooler
+}
- transport, upgrader, err := spdy.RoundTripperFor(env.RestClientConfig)
- if err != nil {
- return nil, err
+// createConnectionParameters returns a map of parameters required to perform a connection
+func createConnectionParameters(user, password, localPort string) map[string]string {
+ return map[string]string{
+ "host": "localhost",
+ "port": localPort,
+ "user": user,
+ "password": password,
}
- dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
-
- psqlc.readyChan = make(chan struct{}, 1)
- psqlc.stopChan = make(chan struct{})
+}
- psqlc.portForward, err = portforward.New(
+func startForwardConnection(
+ dialer httpstream.Dialer,
+ portMap []string,
+ dbname,
+ userApp,
+ passApp string,
+) (*PSQLForwardConnection, *sql.DB, error) {
+ forwarder, err := forwardconnection.NewForwardConnection(
dialer,
- []string{"0:5432"},
- psqlc.stopChan,
- psqlc.readyChan,
- os.Stdout,
- os.Stderr,
+ portMap,
+ io.Discard,
+ io.Discard,
)
-
- return psqlc, err
-}
-
-func (psqlc *PSQLForwardConnection) createRequest(env *TestingEnvironment) *rest.Request {
- return env.Interface.CoreV1().
- RESTClient().
- Post().
- Resource("pods").
- Namespace(psqlc.namespace).
- Name(psqlc.pod).
- SubResource("portforward")
-}
-
-// startAndWait will begin the forward and wait to be ready
-func (psqlc *PSQLForwardConnection) startAndWait() error {
- go func() {
- ginkgo.GinkgoWriter.Printf("Starting port-forward\n")
- psqlc.err = psqlc.portForward.ForwardPorts()
- if psqlc.err != nil {
- ginkgo.GinkgoWriter.Printf("port-forward failed with error %s\n", psqlc.err.Error())
- return
- }
- }()
- select {
- case <-psqlc.readyChan:
- ginkgo.GinkgoWriter.Printf("port-forward ready\n")
- return nil
- case <-psqlc.stopChan:
- ginkgo.GinkgoWriter.Printf("port-forward closed\n")
- return psqlc.err
+ if err != nil {
+ return nil, nil, err
}
-}
-// GetPooler returns the connection Pooler
-func (psqlc *PSQLForwardConnection) GetPooler() *pool.ConnectionPool {
- return psqlc.pooler
-}
+ if err = forwarder.StartAndWait(); err != nil {
+ return nil, nil, err
+ }
-// getLocalPort gets the local port needed to connect to Postgres
-func (psqlc *PSQLForwardConnection) getLocalPort() (string, error) {
- forwardedPorts, err := psqlc.portForward.GetPorts()
+ localPort, err := forwarder.GetLocalPort()
if err != nil {
- return "", err
+ return nil, nil, err
}
- return strconv.Itoa(int(forwardedPorts[0].Local)), nil
-}
+ connParameters := createConnectionParameters(userApp, passApp, localPort)
-// Close will stop the forward and exit
-func (psqlc *PSQLForwardConnection) Close() {
- psqlc.portForward.Close()
-}
+ pooler := pool.NewPgbouncerConnectionPool(configfile.CreateConnectionString(connParameters))
-// createConnectionParameters return the parameters require to create a connection
-// to the current forwarded port
-func (psqlc *PSQLForwardConnection) createConnectionParameters(user, password string) (map[string]string, error) {
- port, err := psqlc.getLocalPort()
+ conn, err := pooler.Connection(dbname)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- return map[string]string{
- "host": "localhost",
- "port": port,
- "user": user,
- "password": password,
- }, nil
+ conn.SetMaxOpenConns(10)
+ conn.SetMaxIdleConns(10)
+ conn.SetConnMaxLifetime(time.Hour)
+ conn.SetConnMaxIdleTime(time.Hour)
+
+ return &PSQLForwardConnection{
+ portForward: forwarder.Forwarder,
+ pooler: pooler,
+ }, conn, err
}
// ForwardPSQLConnection simplifies the creation of forwarded connection to PostgreSQL cluster
@@ -158,8 +117,8 @@ func ForwardPSQLConnection(
return ForwardPSQLConnectionWithCreds(env, namespace, clusterName, dbname, user, pass)
}
-// ForwardPSQLConnectionWithCreds does the same as ForwardPSQLConnection but without trying to
-// get the credentials using the cluster
+// ForwardPSQLConnectionWithCreds creates a forwarded connection to a PostgreSQL cluster
+// using the given credentials
func ForwardPSQLConnectionWithCreds(
env *TestingEnvironment,
namespace,
@@ -173,31 +132,57 @@ func ForwardPSQLConnectionWithCreds(
return nil, nil, err
}
- forward, err := psqlForwardConnectionNew(env, namespace, cluster.Status.CurrentPrimary)
+ dialer, err := forwardconnection.NewDialer(
+ env.Interface,
+ env.RestClientConfig,
+ namespace,
+ cluster.Status.CurrentPrimary,
+ )
if err != nil {
return nil, nil, err
}
- if err = forward.startAndWait(); err != nil {
+ psqlForwardConn, conn, err := startForwardConnection(
+ dialer,
+ []string{forwardconnection.PostgresPortMap},
+ dbname,
+ userApp,
+ passApp,
+ )
+ if err != nil {
return nil, nil, err
}
- connParameters, err := forward.createConnectionParameters(userApp, passApp)
+ return psqlForwardConn, conn, err
+}
+
+// ForwardPSQLServiceConnection creates a forwarded connection to a PostgreSQL service
+// using the given credentials
+func ForwardPSQLServiceConnection(
+ env *TestingEnvironment,
+ namespace,
+ serviceName,
+ dbname,
+ userApp,
+ passApp string,
+) (*PSQLForwardConnection, *sql.DB, error) {
+ dialer, portMap, err := forwardconnection.NewDialerFromService(
+ env.Ctx,
+ env.Interface,
+ env.RestClientConfig,
+ namespace,
+ serviceName,
+ )
if err != nil {
return nil, nil, err
}
- forward.pooler = pool.NewPostgresqlConnectionPool(configfile.CreateConnectionString(connParameters))
- conn, err := forward.pooler.Connection(dbname)
+ psqlForwardConn, conn, err := startForwardConnection(dialer, portMap, dbname, userApp, passApp)
if err != nil {
return nil, nil, err
}
- conn.SetMaxOpenConns(10)
- conn.SetMaxIdleConns(10)
- conn.SetConnMaxLifetime(time.Hour)
- conn.SetConnMaxIdleTime(time.Hour)
- return forward, conn, err
+ return psqlForwardConn, conn, err
}
// RunQueryRowOverForward runs QueryRow with a given query, returning the Row of the SQL command
diff --git a/tests/utils/service.go b/tests/utils/service.go
index e569011b22..cce93ca126 100644
--- a/tests/utils/service.go
+++ b/tests/utils/service.go
@@ -45,6 +45,20 @@ func GetReadWriteServiceName(clusterName string) string {
return fmt.Sprintf("%v%v", clusterName, apiv1.ServiceReadWriteSuffix)
}
+// GetService gets a service given name and namespace
+func GetService(namespace, name string, env *TestingEnvironment) (*corev1.Service, error) {
+ namespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }
+ service := &corev1.Service{}
+ err := GetObject(env, namespacedName, service)
+ if err != nil {
+ return nil, err
+ }
+ return service, nil
+}
+
// GetRwServiceObject return read write service object
func GetRwServiceObject(namespace, clusterName string, env *TestingEnvironment) (*corev1.Service, error) {
svcName := GetReadWriteServiceName(clusterName)
From 741388dfaa3ca6eff66738ee257c7a2283d84369 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 6 Dec 2024 12:19:22 +0100
Subject: [PATCH 213/836] fix(deps): update
github.com/cloudnative-pg/barman-cloud digest to 711113b (main) (#6285)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index eaa13af67f..980261fede 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
github.com/avast/retry-go/v4 v4.6.0
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
- github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14
+ github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
diff --git a/go.sum b/go.sum
index cf4ed929c2..2140149042 100644
--- a/go.sum
+++ b/go.sum
@@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54=
github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
-github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5pXyzVAqfjcDgCa1l8b4sumf7XYnGqiP+6XMgbB2E=
-github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
+github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258 h1:B/Wncxl/OXrXJUHHtBCyxE//6FdIxznERfzPMsNHWfw=
+github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs=
From d5cc9bb03afa2025190e2f2a92ce707c479481b5 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Fri, 6 Dec 2024 13:43:49 +0100
Subject: [PATCH 214/836] feat: ensure unique manager for declarative
PostgreSQL resources (#6258)
This patch extends the existing logic that ensures only one manager
exists for a Postgres resource in the Database support code to also
cover Publications and Subscriptions.
Close #5922
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
api/v1/database_funcs.go | 21 ++++++
api/v1/generic_funcs.go | 64 +++++++++++++++++++
api/v1/publication_funcs.go | 21 ++++++
api/v1/subscription_funcs.go | 21 ++++++
internal/management/controller/common.go | 53 +++++++++++++++
.../controller/database_controller.go | 60 +----------------
.../controller/database_controller_test.go | 2 +-
.../controller/publication_controller.go | 5 ++
.../controller/subscription_controller.go | 5 ++
9 files changed, 194 insertions(+), 58 deletions(-)
create mode 100644 api/v1/generic_funcs.go
diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go
index 198e760ce5..2e87eba148 100644
--- a/api/v1/database_funcs.go
+++ b/api/v1/database_funcs.go
@@ -49,3 +49,24 @@ func (db *Database) GetStatusMessage() string {
func (db *Database) GetClusterRef() corev1.LocalObjectReference {
return db.Spec.ClusterRef
}
+
+// GetManagedObjectName returns the name of the managed database object
+func (db *Database) GetManagedObjectName() string {
+ return db.Spec.Name
+}
+
+// GetName returns the database object name
+func (db *Database) GetName() string {
+ return db.Name
+}
+
+// HasReconciliations returns true if the database object has been reconciled at least once
+func (db *Database) HasReconciliations() bool {
+ return db.Status.ObservedGeneration > 0
+}
+
+// MustHaveManagedResourceExclusivity detects conflicting databases
+func (dbList *DatabaseList) MustHaveManagedResourceExclusivity(reference *Database) error {
+ pointers := toSliceWithPointers(dbList.Items)
+ return ensureManagedResourceExclusivity(reference, pointers)
+}
diff --git a/api/v1/generic_funcs.go b/api/v1/generic_funcs.go
new file mode 100644
index 0000000000..3fc7e756f8
--- /dev/null
+++ b/api/v1/generic_funcs.go
@@ -0,0 +1,64 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+type managedResourceComparer interface {
+ GetName() string
+ GetManagedObjectName() string
+ GetClusterRef() corev1.LocalObjectReference
+ HasReconciliations() bool
+}
+
+func ensureManagedResourceExclusivity[T managedResourceComparer](t1 T, list []T) error {
+ for _, t2 := range list {
+ if t1.GetName() == t2.GetName() {
+ continue
+ }
+
+ if t1.GetClusterRef().Name != t2.GetClusterRef().Name {
+ continue
+ }
+
+ if !t2.HasReconciliations() {
+ continue
+ }
+
+ if t1.GetManagedObjectName() == t2.GetManagedObjectName() {
+ return fmt.Errorf(
+ "%q is already managed by object %q",
+ t1.GetManagedObjectName(), t2.GetName(),
+ )
+ }
+ }
+
+ return nil
+}
+
+// toSliceWithPointers converts a slice of items to a slice of pointers to the items
+func toSliceWithPointers[T any](items []T) []*T {
+ result := make([]*T, len(items))
+ for i, item := range items {
+ result[i] = &item
+ }
+ return result
+}
diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go
index bfda3183a3..c32cc0c0cb 100644
--- a/api/v1/publication_funcs.go
+++ b/api/v1/publication_funcs.go
@@ -49,3 +49,24 @@ func (pub *Publication) GetStatusMessage() string {
func (pub *Publication) GetClusterRef() corev1.LocalObjectReference {
return pub.Spec.ClusterRef
}
+
+// GetManagedObjectName returns the name of the managed publication object
+func (pub *Publication) GetManagedObjectName() string {
+ return pub.Spec.Name
+}
+
+// HasReconciliations returns true if the publication has been reconciled at least once
+func (pub *Publication) HasReconciliations() bool {
+ return pub.Status.ObservedGeneration > 0
+}
+
+// GetName returns the publication name
+func (pub *Publication) GetName() string {
+ return pub.Name
+}
+
+// MustHaveManagedResourceExclusivity detects conflicting publications
+func (pub *PublicationList) MustHaveManagedResourceExclusivity(reference *Publication) error {
+ pointers := toSliceWithPointers(pub.Items)
+ return ensureManagedResourceExclusivity(reference, pointers)
+}
diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go
index 506bf05b81..a337bb04a3 100644
--- a/api/v1/subscription_funcs.go
+++ b/api/v1/subscription_funcs.go
@@ -49,3 +49,24 @@ func (sub *Subscription) GetStatusMessage() string {
func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference {
return sub.Spec.ClusterRef
}
+
+// GetName returns the subscription object name
+func (sub *Subscription) GetName() string {
+ return sub.Name
+}
+
+// GetManagedObjectName returns the name of the managed subscription object
+func (sub *Subscription) GetManagedObjectName() string {
+ return sub.Spec.Name
+}
+
+// HasReconciliations returns true if the subscription has been reconciled at least once
+func (sub *Subscription) HasReconciliations() bool {
+ return sub.Status.ObservedGeneration > 0
+}
+
+// MustHaveManagedResourceExclusivity detects conflicting subscriptions
+func (pub *SubscriptionList) MustHaveManagedResourceExclusivity(reference *Subscription) error {
+ pointers := toSliceWithPointers(pub.Items)
+ return ensureManagedResourceExclusivity(reference, pointers)
+}
diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go
index b5013d6657..d0fe51dc68 100644
--- a/internal/management/controller/common.go
+++ b/internal/management/controller/common.go
@@ -23,10 +23,13 @@ import (
"fmt"
"maps"
"slices"
+ "time"
+ "github.com/cloudnative-pg/machinery/pkg/log"
"github.com/jackc/pgx/v5"
"github.com/lib/pq"
"k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -121,3 +124,53 @@ func toPostgresParameters(parameters map[string]string) string {
// pruning last 2 chars `, `
return b.String()[:len(b.String())-2]
}
+
+type postgresResourceManager interface {
+ client.Object
+ HasReconciliations() bool
+ markableAsFailed
+}
+
+type managedResourceExclusivityEnsurer[T postgresResourceManager] interface {
+ MustHaveManagedResourceExclusivity(newManager T) error
+ client.ObjectList
+}
+
+func detectConflictingManagers[T postgresResourceManager, TL managedResourceExclusivityEnsurer[T]](
+ ctx context.Context,
+ cli client.Client,
+ resource T,
+ list TL,
+) (ctrl.Result, error) {
+ if resource.HasReconciliations() {
+ return ctrl.Result{}, nil
+ }
+ contextLogger := log.FromContext(ctx)
+
+ if err := cli.List(ctx, list,
+ client.InNamespace(resource.GetNamespace()),
+ ); err != nil {
+ kind := list.GetObjectKind().GroupVersionKind().Kind
+
+ contextLogger.Error(err, "while getting list",
+ "kind", kind,
+ "namespace", resource.GetNamespace(),
+ )
+ return ctrl.Result{}, fmt.Errorf("impossible to list %s objects in namespace %s: %w",
+ kind, resource.GetNamespace(), err)
+ }
+
+ // Make sure the target PG element is not being managed by another kubernetes resource
+ if conflictErr := list.MustHaveManagedResourceExclusivity(resource); conflictErr != nil {
+ if markErr := markAsFailed(ctx, cli, resource, conflictErr); markErr != nil {
+ return ctrl.Result{},
+ fmt.Errorf("encountered an error while marking as failed the resource: %w, original error: %w",
+ markErr,
+ conflictErr,
+ )
+ }
+ return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
+ }
+
+ return ctrl.Result{}, nil
+}
diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go
index 22a72861fd..7fdbf5ba22 100644
--- a/internal/management/controller/database_controller.go
+++ b/internal/management/controller/database_controller.go
@@ -114,19 +114,9 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
return ctrl.Result{}, nil
}
- // Make sure the target PG Database is not being managed by another Database Object
- if err := r.ensureOnlyOneManager(ctx, database); err != nil {
- if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil {
- contextLogger.Error(err, "while marking as failed the database resource",
- "error", err,
- "markError", markErr,
- )
- return ctrl.Result{}, fmt.Errorf(
- "encountered an error while marking as failed the database resource: %w, original error: %w",
- markErr,
- err)
- }
- return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
+ if res, err := detectConflictingManagers(ctx, r.Client, &database, &apiv1.DatabaseList{}); err != nil ||
+ !res.IsZero() {
+ return res, err
}
if err := r.reconcileDatabase(ctx, &database); err != nil {
@@ -149,50 +139,6 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil
}
-// ensureOnlyOneManager verifies that the target PostgreSQL Database specified by the given Database object
-// is not already managed by another Database object within the same namespace and cluster.
-// If another Database object is found to be managing the same PostgreSQL database, this method returns an error.
-func (r *DatabaseReconciler) ensureOnlyOneManager(
- ctx context.Context,
- database apiv1.Database,
-) error {
- contextLogger := log.FromContext(ctx)
-
- if database.Status.ObservedGeneration > 0 {
- return nil
- }
-
- var databaseList apiv1.DatabaseList
- if err := r.Client.List(ctx, &databaseList,
- client.InNamespace(r.instance.GetNamespaceName()),
- ); err != nil {
- contextLogger.Error(err, "while getting database list", "namespace", r.instance.GetNamespaceName())
- return fmt.Errorf("impossible to list database objects in namespace %s: %w",
- r.instance.GetNamespaceName(), err)
- }
-
- for _, item := range databaseList.Items {
- if item.Name == database.Name {
- continue
- }
-
- if item.Spec.ClusterRef.Name != r.instance.GetClusterName() {
- continue
- }
-
- if item.Status.ObservedGeneration == 0 {
- continue
- }
-
- if item.Spec.Name == database.Spec.Name {
- return fmt.Errorf("database %q is already managed by Database object %q",
- database.Spec.Name, item.Name)
- }
- }
-
- return nil
-}
-
func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1.Database) error {
if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete {
return nil
diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go
index 37712a2c21..d41d7eab57 100644
--- a/internal/management/controller/database_controller_test.go
+++ b/internal/management/controller/database_controller_test.go
@@ -422,7 +422,7 @@ var _ = Describe("Managed Database status", func() {
}, dbDuplicate)
Expect(err).ToNot(HaveOccurred())
- expectedError := fmt.Sprintf("database %q is already managed by Database object %q",
+ expectedError := fmt.Sprintf("%q is already managed by object %q",
dbDuplicate.Spec.Name, currentManager.Name)
Expect(dbDuplicate.Status.Applied).To(HaveValue(BeFalse()))
Expect(dbDuplicate.Status.Message).To(ContainSubstring(expectedError))
diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go
index 086b37e778..d268367f1e 100644
--- a/internal/management/controller/publication_controller.go
+++ b/internal/management/controller/publication_controller.go
@@ -115,6 +115,11 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil
}
+ if res, err := detectConflictingManagers(ctx, r.Client, &publication, &apiv1.PublicationList{}); err != nil ||
+ !res.IsZero() {
+ return res, err
+ }
+
if err := r.finalizerReconciler.reconcile(ctx, &publication); err != nil {
return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err)
}
diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go
index 16a1165b7f..5fae540722 100644
--- a/internal/management/controller/subscription_controller.go
+++ b/internal/management/controller/subscription_controller.go
@@ -135,6 +135,11 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil
}
+ if res, err := detectConflictingManagers(ctx, r.Client, &subscription, &apiv1.SubscriptionList{}); err != nil ||
+ !res.IsZero() {
+ return res, err
+ }
+
if err := r.alignSubscription(ctx, &subscription, connString); err != nil {
contextLogger.Error(err, "while reconciling subscription")
if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil {
From e17819dcc0605c68dd2b6d73b7c120e872c17504 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Fri, 6 Dec 2024 14:25:51 +0100
Subject: [PATCH 215/836] test: express the initdb unit tests coherently
(#6291)
Signed-off-by: Jaime Silvela
---
pkg/management/postgres/initdb_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go
index 985c42293b..0df37770c9 100644
--- a/pkg/management/postgres/initdb_test.go
+++ b/pkg/management/postgres/initdb_test.go
@@ -32,7 +32,7 @@ var _ = Describe("EnsureTargetDirectoriesDoNotExist", func() {
PgData: GinkgoT().TempDir(),
PgWal: GinkgoT().TempDir(),
}
- Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed())
+ Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().NotTo(HaveOccurred())
Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed())
})
@@ -84,7 +84,7 @@ var _ = Describe("renameExistingTargetDataDirectories", func() {
PgData: GinkgoT().TempDir(),
PgWal: GinkgoT().TempDir(),
}
- Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed())
+ Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().NotTo(HaveOccurred())
Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed())
})
From a4cf356805f29f26e2aed4d0443bc5fa2cc554ed Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Fri, 6 Dec 2024 15:16:16 +0100
Subject: [PATCH 216/836] feat: add `cnpg.io/userType` label to generated
secrets (#4392)
This patch adds a `cnpg.io/userType` label to the secrets containing
users' credentials whose value tells whether the user is a superuser or
an application role.
Support for this label is limited to users created by the operator.
Closes #2631
Signed-off-by: Pierrick
Signed-off-by: Gabriele Bartolini
Signed-off-by: Leonardo Cecchi
Signed-off-by: Marco Nenciarini
Co-authored-by: Gabriele Bartolini
Co-authored-by: Leonardo Cecchi
Co-authored-by: Marco Nenciarini
---
docs/src/labels_annotations.md | 6 ++++++
internal/controller/cluster_create.go | 6 ++++--
pkg/specs/secrets.go | 4 +++-
pkg/specs/secrets_test.go | 6 +++++-
pkg/utils/labels_annotations.go | 19 ++++++++++++++++++-
5 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md
index 8da4514584..299758c434 100644
--- a/docs/src/labels_annotations.md
+++ b/docs/src/labels_annotations.md
@@ -77,6 +77,12 @@ These predefined labels are managed by CloudNativePG.
: Available on `ConfigMap` and `Secret` resources. When set to `true`,
a change in the resource is automatically reloaded by the operator.
+`cnpg.io/userType`
+: Specifies the type of PostgreSQL user associated with the
+ `Secret`, either `superuser` (Postgres superuser access) or `app`
+ (application-level user in CloudNativePG terminology), and is limited to the
+ default users created by CloudNativePG (typically `postgres` and `app`).
+
`role` - **deprecated**
: Whether the instance running in a pod is a `primary` or a `replica`.
This label is deprecated, you should use `cnpg.io/instanceRole` instead.
diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go
index 2aa3a51d47..280fe3361d 100644
--- a/internal/controller/cluster_create.go
+++ b/internal/controller/cluster_create.go
@@ -176,7 +176,8 @@ func (r *ClusterReconciler) reconcileSuperuserSecret(ctx context.Context, cluste
cluster.GetServiceReadWriteName(),
"*",
"postgres",
- postgresPassword)
+ postgresPassword,
+ utils.UserTypeSuperuser)
cluster.SetInheritedDataAndOwnership(&postgresSecret.ObjectMeta)
return createOrPatchClusterCredentialSecret(ctx, r.Client, postgresSecret)
@@ -216,7 +217,8 @@ func (r *ClusterReconciler) reconcileAppUserSecret(ctx context.Context, cluster
cluster.GetServiceReadWriteName(),
cluster.GetApplicationDatabaseName(),
cluster.GetApplicationDatabaseOwner(),
- appPassword)
+ appPassword,
+ utils.UserTypeApp)
cluster.SetInheritedDataAndOwnership(&appSecret.ObjectMeta)
return createOrPatchClusterCredentialSecret(ctx, r.Client, appSecret)
diff --git a/pkg/specs/secrets.go b/pkg/specs/secrets.go
index 2e66a497e7..d7503dd026 100644
--- a/pkg/specs/secrets.go
+++ b/pkg/specs/secrets.go
@@ -35,6 +35,7 @@ func CreateSecret(
dbname string,
username string,
password string,
+ usertype utils.UserType,
) *corev1.Secret {
uriBuilder := newConnectionStringBuilder(hostname, dbname, username, password, namespace)
@@ -43,7 +44,8 @@ func CreateSecret(
Name: name,
Namespace: namespace,
Labels: map[string]string{
- utils.WatchedLabelName: "true",
+ utils.UserTypeLabelName: string(usertype),
+ utils.WatchedLabelName: "true",
},
},
Type: corev1.SecretTypeBasicAuth,
diff --git a/pkg/specs/secrets_test.go b/pkg/specs/secrets_test.go
index 0807c90817..6648b76294 100644
--- a/pkg/specs/secrets_test.go
+++ b/pkg/specs/secrets_test.go
@@ -17,6 +17,8 @@ limitations under the License.
package specs
import (
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -24,7 +26,7 @@ import (
var _ = Describe("Secret creation", func() {
It("create a secret with the right user and password", func() {
secret := CreateSecret("name", "namespace",
- "thishost", "thisdb", "thisuser", "thispassword")
+ "thishost", "thisdb", "thisuser", "thispassword", utils.UserTypeApp)
Expect(secret.Name).To(Equal("name"))
Expect(secret.Namespace).To(Equal("namespace"))
Expect(secret.StringData["username"]).To(Equal("thisuser"))
@@ -39,5 +41,7 @@ var _ = Describe("Secret creation", func() {
Expect(secret.StringData["jdbc-uri"]).To(
Equal("jdbc:postgresql://thishost.namespace:5432/thisdb?password=thispassword&user=thisuser"),
)
+ Expect(secret.Labels).To(
+ HaveKeyWithValue(utils.UserTypeLabelName, string(utils.UserTypeApp)))
})
})
diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go
index cc6a9e19ba..187325013f 100644
--- a/pkg/utils/labels_annotations.go
+++ b/pkg/utils/labels_annotations.go
@@ -71,10 +71,14 @@ const (
// scheduled backup if a backup is created by a scheduled backup
ParentScheduledBackupLabelName = MetadataNamespace + "/scheduled-backup"
- // WatchedLabelName the name of the label which tell if a resource change will be automatically reloaded by instance
+ // WatchedLabelName the name of the label which tells if a resource change will be automatically reloaded by instance
// or not, use for Secrets or ConfigMaps
WatchedLabelName = MetadataNamespace + "/reload"
+ // UserTypeLabelName the name of the label which tells if a Secret refers
+ // to a superuser database role or an application one
+ UserTypeLabelName = MetadataNamespace + "/userType"
+
// BackupTimelineLabelName is the name or the label where the timeline of a backup is kept
BackupTimelineLabelName = MetadataNamespace + "/backupTimeline"
@@ -273,6 +277,19 @@ const (
HibernationAnnotationValueOn HibernationAnnotationValue = "on"
)
+// UserType tells if a secret refers to a superuser database role
+// or an application one
+type UserType string
+
+const (
+ // UserTypeSuperuser is the type of a superuser database
+ // role
+ UserTypeSuperuser UserType = "superuser"
+
+ // UserTypeApp is the type of an application role
+ UserTypeApp UserType = "app"
+)
+
// LabelClusterName labels the object with the cluster name
func LabelClusterName(object *metav1.ObjectMeta, name string) {
if object.Labels == nil {
From d699efa07575ca6068800137488b1f29ab1d4e0b Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Fri, 6 Dec 2024 15:49:07 +0100
Subject: [PATCH 217/836] test(e2e): AssertConnection must retry before failing
(#6293)
Fix a regression in the E2E testing suite introduced in #5898
Signed-off-by: Marco Nenciarini
---
tests/e2e/asserts_test.go | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index a7a5eb21ee..8c6d459fec 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -415,19 +415,21 @@ func AssertConnection(
})
By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() {
- forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
- dbname, user, password)
- defer func() {
- _ = conn.Close()
- forwardConn.Close()
- }()
- Expect(err).ToNot(HaveOccurred())
+ Eventually(func(g Gomega) {
+ forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
+ dbname, user, password)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ g.Expect(err).ToNot(HaveOccurred())
- var rawValue string
- row := conn.QueryRow("SELECT 1")
- err = row.Scan(&rawValue)
- Expect(err).ToNot(HaveOccurred())
- Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1"))
+ var rawValue string
+ row := conn.QueryRow("SELECT 1")
+ err = row.Scan(&rawValue)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1"))
+ }, RetryTimeout).Should(Succeed())
})
}
From 1839f1b732ce3333f07bfe9f74f2c47dbf66360c Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Fri, 6 Dec 2024 16:48:58 +0100
Subject: [PATCH 218/836] feat(OLM): enhance UI descriptions for Database,
Publication, and Subscription CRDs (#6249)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This patch improves the OLM user interface by adding detailed
descriptions for the Database, Publication, and Subscription custom
resources.
Closes #6248
Signed-off-by: Jaime Silvela
Signed-off-by: Gabriele Bartolini
Signed-off-by: Niccolò Fei
Co-authored-by: Gabriele Bartolini
Co-authored-by: Niccolò Fei
---
.../cloudnative-pg.clusterserviceversion.yaml | 139 +++++++++++++++---
1 file changed, 119 insertions(+), 20 deletions(-)
diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
index a8d0c40f05..fc286e39e7 100644
--- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
+++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
@@ -784,30 +784,120 @@ spec:
- 'urn:alm:descriptor:com.tectonic.ui:text'
- kind: Database
name: databases.postgresql.cnpg.io
- displayName: Database management
- description: Declarative database management
+ displayName: Postgres Database
+ description: Declarative creation and management of a database on a Cluster
version: v1
resources:
- kind: Cluster
name: ''
version: v1
specDescriptors:
- - path: databaseReclaimPolicy
- displayName: Database reclaim policy
- description: Database reclaim policy
- path: cluster
displayName: Cluster requested to create the database
- description: Cluster requested to create the database
+ description: Cluster in which to create the database
- path: name
displayName: Database name
description: Database name
- path: owner
displayName: Database Owner
- description: Database Owner
+ description: Owner of the database that will be created in Postgres
+ - path: ensure
+ displayName: Ensure
+ description: Ensure the PostgreSQL database is `present` or `absent`
+ - path: databaseReclaimPolicy
+ displayName: Database reclaim policy
+ description: Specifies the action to take for the database inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management.
+ # Configuration section
+ - path: template
+ displayName: Template
+ description: The name of the template from which to create this database.
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: tablespace
+ displayName: Tablespace
+ description: The name of the tablespace that will be associated with the database.
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: isTemplate
+ displayName: Database is a template
+ description: If true, this database is considered a template
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: allowConnections
+ displayName: Allow Connections
+ description: If false, then no one can connect to this database
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: connectionLimit
+ displayName: Connection Limit
+ description: How many concurrent connections can be made to this database
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:number'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # Encoding and Locale
+ - path: encoding
+ displayName: Encoding
+ description: Character set encoding to use in the database
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: locale
+ displayName: Locale
+ description: Sets the default collation order and character classification for the database.
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: localeCollate
+ displayName: LC collate
+ description: The collation to use for the database
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: localeCType
+ displayName: LC ctype
+ description: The ctype to use for the database
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: localeProvider
+ displayName: Locale Provider
+ description: Specifies the provider to use for the default collation in this database (Available from PostgreSQL 16).
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: collationVersion
+ displayName: Collation version
+ description: The version identifier of the collation
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: builtinLocale
+ displayName: Builtin locale
+ description: The choice of which builtin locale to use
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ # ICU section
+ - path: icuLocale
+ displayName: ICU locale
+ description: ICU locale to use for the database
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - path: icuRules
+ displayName: ICU rules
+ description: Additional customization of ICU locale
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:text'
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
- kind: Publication
name: publications.postgresql.cnpg.io
- displayName: Publication
- description: Declarative publication
+ displayName: Postgres Publication
+ description: Declarative creation and management of a Logical Replication Publication in a PostgreSQL Cluster
version: v1
resources:
- kind: Cluster
@@ -816,20 +906,23 @@ spec:
specDescriptors:
- path: name
displayName: Publication name
- description: Publication name
+ description: Name of the publication for PostgreSQL logical replication
- path: dbname
displayName: Database name
- description: Database name
+ description: Database on which the publication will be created
- path: cluster
displayName: Cluster requested to create the publication
- description: Cluster requested to create the publication
+ description: Cluster on which the publication will be created
- path: target
displayName: Publication target
- description: Publication target
+ description: Specifies which tables/schemas in the database should be published
+ - path: publicationReclaimPolicy
+ displayName: Publication reclaim policy
+ description: Specifies the action to take for the publication inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management.
- kind: Subscription
name: subscriptions.postgresql.cnpg.io
- displayName: Subscription
- description: Declarative subscription
+ displayName: Postgres Subscription
+ description: Declarative creation and management of a Logical Replication Subscription in a PostgreSQL Cluster to a previously defined Publication
version: v1
resources:
- kind: Cluster
@@ -838,16 +931,22 @@ spec:
specDescriptors:
- path: name
displayName: Subscription name
- description: Subscription name
+ description: Name of the subscription for PostgreSQL logical replication
- path: dbname
displayName: Database name
- description: Database name
+ description: Database on which the Subscription will be created
- path: publicationName
displayName: Publication name
- description: Publication name
+ description: Name of the Publication to subscribe to
- path: cluster
displayName: Cluster requested to create the subscription
- description: Cluster requested to create the subscription
+ description: Cluster on which the subscription will be created (subscriber)
- path: externalClusterName
displayName: Name of the external cluster with publication
- description: Name of the external cluster with publication
+ description: Name of the cluster where the Publication is defined (publisher)
+ - path: publicationDBName
+ displayName: Name of the database containing the publication on the external cluster
+ description: The name of the database containing the publication on the external cluster. Defaults to the one in the external cluster definition.
+ - path: subscriptionReclaimPolicy
+ displayName: Subscription reclaim policy
+ description: Specifies the action to take for the subscription inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management.
From 9ca9edcd1a7f2927590cdfbdedbf2a94800ed75b Mon Sep 17 00:00:00 2001
From: Jeff Mealo
Date: Fri, 6 Dec 2024 11:07:21 -0500
Subject: [PATCH 219/836] feat(plugin): set `User-Agent` in HTTP requests
(#6153)
Properly set the `User-Agent` header in HTTP requests to the Kubernetes
API server.
Closes #6038
Signed-off-by: Jeff Mealo
Signed-off-by: Marco Nenciarini
Signed-off-by: Leonardo Cecchi
Co-authored-by: Marco Nenciarini
Co-authored-by: Leonardo Cecchi
---
internal/cmd/plugin/plugin.go | 4 ++++
internal/cmd/plugin/plugin_test.go | 7 +++++++
2 files changed, 11 insertions(+)
diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go
index b22c1a9d6c..432e347283 100644
--- a/internal/cmd/plugin/plugin.go
+++ b/internal/cmd/plugin/plugin.go
@@ -37,6 +37,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
)
var (
@@ -108,11 +109,14 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error {
func createClient(cfg *rest.Config) error {
var err error
+
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = apiv1.AddToScheme(scheme)
_ = storagesnapshotv1.AddToScheme(scheme)
+ cfg.UserAgent = fmt.Sprintf("kubectl-cnpg/v%s (%s)", versions.Version, versions.Info.Commit)
+
Client, err = client.New(cfg, client.Options{Scheme: scheme})
if err != nil {
return err
diff --git a/internal/cmd/plugin/plugin_test.go b/internal/cmd/plugin/plugin_test.go
index b6cfebe70f..8d0ed55683 100644
--- a/internal/cmd/plugin/plugin_test.go
+++ b/internal/cmd/plugin/plugin_test.go
@@ -23,6 +23,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -30,8 +31,14 @@ import (
var _ = Describe("create client", func() {
It("with given configuration", func() {
+ // createClient is not a pure function and as a side effect
+ // it will:
+ // - set the Client global variable
+ // - set the UserAgent field inside cfg
err := createClient(cfg)
+
Expect(err).NotTo(HaveOccurred())
+ Expect(cfg.UserAgent).To(Equal("kubectl-cnpg/v" + versions.Version + " (" + versions.Info.Commit + ")"))
Expect(Client).NotTo(BeNil())
})
})
From fab33aeddddb5a0832732ea3453fa31c12138ea3 Mon Sep 17 00:00:00 2001
From: Peggie
Date: Fri, 6 Dec 2024 17:13:42 +0100
Subject: [PATCH 220/836] feat: Public Cloud K8S versions update (#6263)
Update the versions used to test the operator on public cloud providers
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: public-cloud-k8s-versions-check
---
.github/aks_versions.json | 4 ++--
.github/openshift_versions.json | 1 +
Makefile | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/.github/aks_versions.json b/.github/aks_versions.json
index 3ffbbe129d..b5a3eed520 100644
--- a/.github/aks_versions.json
+++ b/.github/aks_versions.json
@@ -1,6 +1,6 @@
[
- "1.31.1",
- "1.30.5",
+ "1.31.2",
+ "1.30.6",
"1.29.9",
"1.28.9"
]
diff --git a/.github/openshift_versions.json b/.github/openshift_versions.json
index 08587af16a..49cf2ac65d 100644
--- a/.github/openshift_versions.json
+++ b/.github/openshift_versions.json
@@ -1,4 +1,5 @@
[
+ "4.18",
"4.17",
"4.16",
"4.15",
diff --git a/Makefile b/Makefile
index 2b93fba0ea..6a80924a5a 100644
--- a/Makefile
+++ b/Makefile
@@ -49,7 +49,7 @@ WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.38.0
OPM_VERSION ?= v1.48.0
PREFLIGHT_VERSION ?= 1.10.2
-OPENSHIFT_VERSIONS ?= v4.12-v4.17
+OPENSHIFT_VERSIONS ?= v4.12-v4.18
ARCH ?= amd64
export CONTROLLER_IMG
From fd62a1c01e7eb1a9effeb6a0773f36f305b10fa1 Mon Sep 17 00:00:00 2001
From: Abhishek Chanda
Date: Fri, 6 Dec 2024 11:13:47 -0600
Subject: [PATCH 221/836] test: make sure we test port correctness for services
(#4934)
Signed-off-by: Abhishek Chanda
Signed-off-by: Marco Nenciarini
Signed-off-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
Co-authored-by: Armando Ruocco
---
pkg/specs/services_test.go | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/pkg/specs/services_test.go b/pkg/specs/services_test.go
index a7c0922b98..dd146fde25 100644
--- a/pkg/specs/services_test.go
+++ b/pkg/specs/services_test.go
@@ -19,8 +19,10 @@ package specs
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
@@ -33,6 +35,12 @@ var _ = Describe("Services specification", func() {
Name: "clustername",
},
}
+ expectedPort := corev1.ServicePort{
+ Name: PostgresContainerName,
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.FromInt32(postgres.ServerPort),
+ Port: postgres.ServerPort,
+ }
It("create a configured -any service", func() {
service := CreateClusterAnyService(postgresql)
@@ -40,6 +48,8 @@ var _ = Describe("Services specification", func() {
Expect(service.Spec.PublishNotReadyAddresses).To(BeTrue())
Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername"))
Expect(service.Spec.Selector[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance)))
+ Expect(service.Spec.Ports).To(HaveLen(1))
+ Expect(service.Spec.Ports).To(ContainElement(expectedPort))
})
It("create a configured -r service", func() {
@@ -48,6 +58,8 @@ var _ = Describe("Services specification", func() {
Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse())
Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername"))
Expect(service.Spec.Selector[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance)))
+ Expect(service.Spec.Ports).To(HaveLen(1))
+ Expect(service.Spec.Ports).To(ContainElement(expectedPort))
})
It("create a configured -ro service", func() {
@@ -56,6 +68,8 @@ var _ = Describe("Services specification", func() {
Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse())
Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername"))
Expect(service.Spec.Selector[utils.ClusterInstanceRoleLabelName]).To(Equal(ClusterRoleLabelReplica))
+ Expect(service.Spec.Ports).To(HaveLen(1))
+ Expect(service.Spec.Ports).To(ContainElement(expectedPort))
})
It("create a configured -rw service", func() {
@@ -64,6 +78,8 @@ var _ = Describe("Services specification", func() {
Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse())
Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername"))
Expect(service.Spec.Selector[utils.ClusterInstanceRoleLabelName]).To(Equal(ClusterRoleLabelPrimary))
+ Expect(service.Spec.Ports).To(HaveLen(1))
+ Expect(service.Spec.Ports).To(ContainElement(expectedPort))
})
})
From 893c61a6fe978832fa27dc177e62d6146d5064c1 Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Fri, 6 Dec 2024 18:20:48 +0100
Subject: [PATCH 222/836] docs: clarify behavior of primaryUpdateStrategy for
single-instance clusters (#5001)
Clarified the behavior of primaryUpdateStrategy when applied to a
cluster consisting of a single instance.
Signed-off-by: Pierrick
---
docs/src/installation_upgrade.md | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md
index ec9b2019a6..c948a61148 100644
--- a/docs/src/installation_upgrade.md
+++ b/docs/src/installation_upgrade.md
@@ -170,7 +170,10 @@ promote the new primary instance using the `cnpg` plugin for `kubectl`.
!!! Important
In case `primaryUpdateStrategy` is set to the default value of `unsupervised`,
an upgrade of the operator will trigger a switchover on your PostgreSQL cluster,
- causing a (normally negligible) downtime.
+ causing a (normally negligible) downtime. If your PostgreSQL Cluster has only one
+ instance, the instance will be automatically restarted as `supervised` value is
+ not supported for `primaryUpdateStrategy`. In either case, your applications will
+ have to reconnect to PostgreSQL.
The default rolling update behavior can be replaced with in-place updates of
the instance manager. This approach does not require a restart of the
From 1b873886aa68c2295186d28beb483e9e1fab3efd Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Fri, 6 Dec 2024 09:44:22 -0800
Subject: [PATCH 223/836] docs: heading level in (#3806)
Signed-off-by: Josh Heyer
---
docs/src/cluster_conf.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/src/cluster_conf.md b/docs/src/cluster_conf.md
index 43ed25b3fd..3634b11e59 100644
--- a/docs/src/cluster_conf.md
+++ b/docs/src/cluster_conf.md
@@ -47,7 +47,7 @@ CloudNativePG relies on [ephemeral volumes](https://kubernetes.io/docs/concepts/
for part of the internal activities. Ephemeral volumes exist for the sole
duration of a pod's life, without persisting across pod restarts.
-# Volume Claim Template for Temporary Storage
+### Volume Claim Template for Temporary Storage
The operator uses by default an `emptyDir` volume, which can be customized by using the `.spec.ephemeralVolumesSizeLimit field`.
This can be overridden by specifying a volume claim template in the `.spec.ephemeralVolumeSource` field.
From d5ad53e003b97d4d236e3f180aae76bea8030ff8 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Sat, 7 Dec 2024 10:31:25 +0100
Subject: [PATCH 224/836] refactor: centralize webserver client logic (#6163)
This patch centralizes all the logic regarding the webserver clients
inside one package instead of having it scattered around the codebase
Signed-off-by: Armando Ruocco
---
internal/cmd/manager/instance/status/cmd.go | 8 +-
internal/cmd/manager/walarchive/cmd.go | 10 +-
internal/cmd/manager/walrestore/cmd.go | 3 +-
internal/controller/backup_controller.go | 6 +-
internal/controller/cluster_controller.go | 6 +-
internal/controller/cluster_upgrade.go | 4 +-
internal/management/cache/cache.go | 11 --
internal/management/cache/doc.go | 19 +++
internal/management/cache/keys.go | 26 ++++
internal/plugin/resources/instance.go | 4 +-
pkg/management/postgres/archiver/archiver.go | 4 +-
.../postgres/webserver/backup_client.go | 146 ------------------
.../webserver/client/common}/client.go | 2 +-
.../postgres/webserver/client/common}/doc.go | 4 +-
.../postgres/webserver/client/local/backup.go | 98 ++++++++++++
.../postgres/webserver/client/local/cache.go | 30 ++--
.../webserver/client/local/cluster.go | 71 +++++++++
.../postgres/webserver/client/local/doc.go | 18 +++
.../postgres/webserver/client/local/local.go | 62 ++++++++
.../webserver/client/local/request.go | 69 +++++++++
.../postgres/webserver/client/remote/doc.go | 18 +++
.../webserver/client/remote/instance.go} | 30 ++--
.../webserver/client/remote/remote.go | 37 +++++
.../postgres/webserver/local_client.go | 63 --------
.../webserver/metricserver/pg_collector.go | 4 +-
.../postgres/webserver/metricserver/wal.go | 4 +-
.../backup/volumesnapshot/online.go | 5 +-
.../backup/volumesnapshot/reconciler.go | 6 +-
.../replicaclusterswitch/reconciler.go | 6 +-
.../replicaclusterswitch/shutdown_wal.go | 4 +-
30 files changed, 493 insertions(+), 285 deletions(-)
create mode 100644 internal/management/cache/doc.go
create mode 100644 internal/management/cache/keys.go
delete mode 100644 pkg/management/postgres/webserver/backup_client.go
rename pkg/{resources => management/postgres/webserver/client/common}/client.go (98%)
rename pkg/{resources/instance => management/postgres/webserver/client/common}/doc.go (84%)
create mode 100644 pkg/management/postgres/webserver/client/local/backup.go
rename internal/management/cache/client/client.go => pkg/management/postgres/webserver/client/local/cache.go (75%)
create mode 100644 pkg/management/postgres/webserver/client/local/cluster.go
create mode 100644 pkg/management/postgres/webserver/client/local/doc.go
create mode 100644 pkg/management/postgres/webserver/client/local/local.go
create mode 100644 pkg/management/postgres/webserver/client/local/request.go
create mode 100644 pkg/management/postgres/webserver/client/remote/doc.go
rename pkg/{resources/instance/client.go => management/postgres/webserver/client/remote/instance.go} (91%)
create mode 100644 pkg/management/postgres/webserver/client/remote/remote.go
delete mode 100644 pkg/management/postgres/webserver/local_client.go
diff --git a/internal/cmd/manager/instance/status/cmd.go b/internal/cmd/manager/instance/status/cmd.go
index d1361d71f1..31c8ed506d 100644
--- a/internal/cmd/manager/instance/status/cmd.go
+++ b/internal/cmd/manager/instance/status/cmd.go
@@ -29,11 +29,11 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
- cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
)
// NewCmd create the "instance status" subcommand
@@ -56,7 +56,7 @@ func statusSubCommand(ctx context.Context) error {
return err
}
- cluster, err := cacheClient.GetCluster()
+ cluster, err := local.NewClient().Cache().GetCluster()
if err != nil {
contextLogger.Error(err, "while loading the cluster from cache")
return err
@@ -131,6 +131,6 @@ func executeRequest(ctx context.Context, scheme string) (*http.Response, error)
contextLogger.Error(err, "Error while building the request")
return nil, err
}
- httpClient := resources.NewHTTPClient(connectionTimeout, requestTimeout)
+ httpClient := common.NewHTTPClient(connectionTimeout, requestTimeout)
return httpClient.Do(req) // nolint:gosec
}
diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go
index 2442f32715..665c656c1f 100644
--- a/internal/cmd/manager/walarchive/cmd.go
+++ b/internal/cmd/manager/walarchive/cmd.go
@@ -25,9 +25,8 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/spf13/cobra"
- cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
)
// errSwitchoverInProgress is raised when there is a switchover in progress
@@ -54,7 +53,8 @@ func NewCmd() *cobra.Command {
return err
}
- cluster, errCluster := cacheClient.GetCluster()
+ localClient := local.NewClient()
+ cluster, errCluster := localClient.Cache().GetCluster()
if errCluster != nil {
return fmt.Errorf("failed to get cluster: %w", errCluster)
}
@@ -66,13 +66,13 @@ func NewCmd() *cobra.Command {
} else {
contextLog.Error(err, logErrorMessage)
}
- if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil {
+ if reqErr := localClient.Cluster().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil {
contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint")
}
return err
}
- if err := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, ""); err != nil {
+ if err := localClient.Cluster().SetWALArchiveStatusCondition(ctx, ""); err != nil {
contextLog.Error(err, "while invoking the set wal archive condition endpoint")
}
return nil
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index db88cb2725..4accc2a023 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -37,7 +37,7 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
- cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
)
@@ -113,6 +113,7 @@ func run(ctx context.Context, pgData string, podName string, args []string) erro
var cluster *apiv1.Cluster
var err error
+ cacheClient := local.NewClient().Cache()
cluster, err = cacheClient.GetCluster()
if err != nil {
return fmt.Errorf("failed to get cluster: %w", err)
diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go
index 424ee3966c..4506c08737 100644
--- a/internal/controller/backup_controller.go
+++ b/internal/controller/backup_controller.go
@@ -47,9 +47,9 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/backup/volumesnapshot"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -71,7 +71,7 @@ type BackupReconciler struct {
Recorder record.EventRecorder
Plugins repository.Interface
- instanceStatusClient instance.Client
+ instanceStatusClient remote.InstanceClient
}
// NewBackupReconciler properly initializes the BackupReconciler
@@ -85,7 +85,7 @@ func NewBackupReconciler(
DiscoveryClient: discoveryClient,
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("cloudnative-pg-backup"),
- instanceStatusClient: instance.NewStatusClient(),
+ instanceStatusClient: remote.NewClient().Instance(),
Plugins: plugins,
}
}
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 2c7dc542af..fb2705f093 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -50,12 +50,12 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
rolloutManager "github.com/cloudnative-pg/cloudnative-pg/internal/controller/rollout"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation"
instanceReconciler "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -83,7 +83,7 @@ type ClusterReconciler struct {
DiscoveryClient discovery.DiscoveryInterface
Scheme *runtime.Scheme
Recorder record.EventRecorder
- InstanceClient instance.Client
+ InstanceClient remote.InstanceClient
Plugins repository.Interface
rolloutManager *rolloutManager.Manager
@@ -96,7 +96,7 @@ func NewClusterReconciler(
plugins repository.Interface,
) *ClusterReconciler {
return &ClusterReconciler{
- InstanceClient: instance.NewStatusClient(),
+ InstanceClient: remote.NewClient().Instance(),
DiscoveryClient: discoveryClient,
Client: operatorclient.NewExtendedClient(mgr.GetClient()),
Scheme: mgr.GetScheme(),
diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go
index cd3e6ac43a..b29dee1929 100644
--- a/internal/controller/cluster_upgrade.go
+++ b/internal/controller/cluster_upgrade.go
@@ -30,9 +30,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -622,7 +622,7 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e
}
envConfig := specs.CreatePodEnvConfig(*cluster, pod.Name)
gracePeriod := int64(cluster.GetMaxStopDelay())
- tlsEnabled := instance.GetStatusSchemeFromPod(pod).IsHTTPS()
+ tlsEnabled := remote.GetStatusSchemeFromPod(pod).IsHTTPS()
targetPodSpec := specs.CreateClusterPodSpec(pod.Name, *cluster, envConfig, gracePeriod, tlsEnabled)
// the bootstrap init-container could change image after an operator upgrade.
diff --git a/internal/management/cache/cache.go b/internal/management/cache/cache.go
index ad1b559e8c..4bdc0519f4 100644
--- a/internal/management/cache/cache.go
+++ b/internal/management/cache/cache.go
@@ -14,23 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package cache contains the constants and functions for reading/writing to the process local cache
-// some specific supported objects
package cache
import (
"sync"
)
-const (
- // ClusterKey is the key to be used to access the cached cluster
- ClusterKey = "cluster"
- // WALArchiveKey is the key to be used to access the cached envs for wal-archive
- WALArchiveKey = "wal-archive"
- // WALRestoreKey is the key to be used to access the cached envs for wal-restore
- WALRestoreKey = "wal-restore"
-)
-
var cache sync.Map
// Store write an object into the local cache
diff --git a/internal/management/cache/doc.go b/internal/management/cache/doc.go
new file mode 100644
index 0000000000..41acc1351a
--- /dev/null
+++ b/internal/management/cache/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cache contains the constants and functions for reading/writing to the process local cache
+// some specific supported objects
+package cache
diff --git a/internal/management/cache/keys.go b/internal/management/cache/keys.go
new file mode 100644
index 0000000000..2792f882c3
--- /dev/null
+++ b/internal/management/cache/keys.go
@@ -0,0 +1,26 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+const (
+ // ClusterKey is the key to be used to access the cached cluster
+ ClusterKey = "cluster"
+ // WALArchiveKey is the key to be used to access the cached envs for wal-archive
+ WALArchiveKey = "wal-archive"
+ // WALRestoreKey is the key to be used to access the cached envs for wal-restore
+ WALRestoreKey = "wal-restore"
+)
diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go
index 03d604e67e..612fb37034 100644
--- a/internal/plugin/resources/instance.go
+++ b/internal/plugin/resources/instance.go
@@ -33,9 +33,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -103,7 +103,7 @@ func getInstanceStatusFromPod(
CoreV1().
Pods(pod.Namespace).
ProxyGet(
- instance.GetStatusSchemeFromPod(&pod).ToString(),
+ remote.GetStatusSchemeFromPod(&pod).ToString(),
pod.Name,
strconv.Itoa(int(url.StatusPort)),
url.PathPgStatus,
diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go
index 251ca52f5a..ccf24efd36 100644
--- a/pkg/management/postgres/archiver/archiver.go
+++ b/pkg/management/postgres/archiver/archiver.go
@@ -36,8 +36,8 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
- cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -165,7 +165,7 @@ func internalRun(
}
// Get environment from cache
- env, err := cacheClient.GetEnv(cache.WALArchiveKey)
+ env, err := local.NewClient().Cache().GetEnv(cache.WALArchiveKey)
if err != nil {
return fmt.Errorf("failed to get envs: %w", err)
}
diff --git a/pkg/management/postgres/webserver/backup_client.go b/pkg/management/postgres/webserver/backup_client.go
deleted file mode 100644
index 549415d82c..0000000000
--- a/pkg/management/postgres/webserver/backup_client.go
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package webserver
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "time"
-
- "github.com/cloudnative-pg/machinery/pkg/log"
- corev1 "k8s.io/api/core/v1"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
-)
-
-// backupClient a client to interact with the instance backup endpoints
-type backupClient struct {
- cli *http.Client
-}
-
-// BackupClient is a struct capable of interacting with the instance backup endpoints
-type BackupClient interface {
- StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error)
- Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error
- Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error
-}
-
-// NewBackupClient creates a client capable of interacting with the instance backup endpoints
-func NewBackupClient() BackupClient {
- const connectionTimeout = 2 * time.Second
- const requestTimeout = 30 * time.Second
-
- return &backupClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)}
-}
-
-// StatusWithErrors retrieves the current status of the backup.
-// Returns the response body in case there is an error in the request
-func (c *backupClient) StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error) {
- scheme := instance.GetStatusSchemeFromPod(pod)
- httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
- req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil)
- if err != nil {
- return nil, err
- }
-
- return executeRequestWithError[BackupResultData](ctx, c.cli, req, true)
-}
-
-// Start runs the pg_start_backup
-func (c *backupClient) Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error {
- scheme := instance.GetStatusSchemeFromPod(pod)
- httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
-
- // Marshalling the payload to JSON
- jsonBody, err := json.Marshal(sbq)
- if err != nil {
- return fmt.Errorf("failed to marshal start payload: %w", err)
- }
-
- req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody))
- if err != nil {
- return err
- }
- req.Header.Set("Content-Type", "application/json")
-
- _, err = executeRequestWithError[struct{}](ctx, c.cli, req, false)
- return err
-}
-
-// Stop runs the command pg_stop_backup
-func (c *backupClient) Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error {
- scheme := instance.GetStatusSchemeFromPod(pod)
- httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
- // Marshalling the payload to JSON
- jsonBody, err := json.Marshal(sbq)
- if err != nil {
- return fmt.Errorf("failed to marshal stop payload: %w", err)
- }
-
- req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody))
- if err != nil {
- return err
- }
- _, err = executeRequestWithError[BackupResultData](ctx, c.cli, req, false)
- return err
-}
-
-func executeRequestWithError[T any](
- ctx context.Context,
- cli *http.Client,
- req *http.Request,
- ignoreBodyErrors bool,
-) (*Response[T], error) {
- contextLogger := log.FromContext(ctx)
-
- resp, err := cli.Do(req)
- if err != nil {
- return nil, fmt.Errorf("while executing http request: %w", err)
- }
-
- defer func() {
- if err := resp.Body.Close(); err != nil {
- contextLogger.Error(err, "while closing response body")
- }
- }()
-
- body, err := io.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("while reading the response body: %w", err)
- }
-
- if resp.StatusCode == http.StatusInternalServerError {
- return nil, fmt.Errorf("encountered an internal server error status code 500 with body: %s", string(body))
- }
-
- var result Response[T]
- if err := json.Unmarshal(body, &result); err != nil {
- return nil, fmt.Errorf("while unmarshalling the body, body: %s err: %w", string(body), err)
- }
- if result.Error != nil && !ignoreBodyErrors {
- return nil, fmt.Errorf("body contained an error code: %s and message: %s",
- result.Error.Code, result.Error.Message)
- }
-
- return &result, nil
-}
diff --git a/pkg/resources/client.go b/pkg/management/postgres/webserver/client/common/client.go
similarity index 98%
rename from pkg/resources/client.go
rename to pkg/management/postgres/webserver/client/common/client.go
index f66cb51446..06bee98158 100644
--- a/pkg/resources/client.go
+++ b/pkg/management/postgres/webserver/client/common/client.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package resources
+package common
import (
"context"
diff --git a/pkg/resources/instance/doc.go b/pkg/management/postgres/webserver/client/common/doc.go
similarity index 84%
rename from pkg/resources/instance/doc.go
rename to pkg/management/postgres/webserver/client/common/doc.go
index 975dc071f9..35dc461c8e 100644
--- a/pkg/resources/instance/doc.go
+++ b/pkg/management/postgres/webserver/client/common/doc.go
@@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package instance contains the client capable of querying the HTTP instances endpoints
-package instance
+// Package common provides common utilities for the webserver client.
+package common
diff --git a/pkg/management/postgres/webserver/client/local/backup.go b/pkg/management/postgres/webserver/client/local/backup.go
new file mode 100644
index 0000000000..8a7d4eb57f
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/local/backup.go
@@ -0,0 +1,98 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package local
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
+)
+
+// BackupClient is the interface to interact with the backup endpoints
+type BackupClient interface {
+ StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*webserver.Response[webserver.BackupResultData], error)
+ Start(ctx context.Context, pod *corev1.Pod, sbq webserver.StartBackupRequest) error
+ Stop(ctx context.Context, pod *corev1.Pod, sbq webserver.StopBackupRequest) error
+}
+
+// backupClientImpl a client to interact with the instance backup endpoints
+type backupClientImpl struct {
+ cli *http.Client
+}
+
+// StatusWithErrors retrieves the current status of the backup.
+// Returns the response body in case there is an error in the request
+func (c *backupClientImpl) StatusWithErrors(
+ ctx context.Context,
+ pod *corev1.Pod,
+) (*webserver.Response[webserver.BackupResultData], error) {
+ scheme := remote.GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
+ req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true)
+}
+
+// Start runs the pg_start_backup
+func (c *backupClientImpl) Start(ctx context.Context, pod *corev1.Pod, sbq webserver.StartBackupRequest) error {
+ scheme := remote.GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
+
+ // Marshalling the payload to JSON
+ jsonBody, err := json.Marshal(sbq)
+ if err != nil {
+ return fmt.Errorf("failed to marshal start payload: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ _, err = executeRequestWithError[struct{}](ctx, c.cli, req, false)
+ return err
+}
+
+// Stop runs the command pg_stop_backup
+func (c *backupClientImpl) Stop(ctx context.Context, pod *corev1.Pod, sbq webserver.StopBackupRequest) error {
+ scheme := remote.GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
+ // Marshalling the payload to JSON
+ jsonBody, err := json.Marshal(sbq)
+ if err != nil {
+ return fmt.Errorf("failed to marshal stop payload: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody))
+ if err != nil {
+ return err
+ }
+ _, err = executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, false)
+ return err
+}
diff --git a/internal/management/cache/client/client.go b/pkg/management/postgres/webserver/client/local/cache.go
similarity index 75%
rename from internal/management/cache/client/client.go
rename to pkg/management/postgres/webserver/client/local/cache.go
index 4c3486c579..a950018cbc 100644
--- a/internal/management/cache/client/client.go
+++ b/pkg/management/postgres/webserver/client/local/cache.go
@@ -14,9 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package client contains the constants and functions for reading supported objects from cache
-// or building them in case of cache miss.
-package client
+package local
import (
"encoding/json"
@@ -32,9 +30,19 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
)
+// CacheClient is the interface to interact with the cache endpoints
+type CacheClient interface {
+ GetCluster() (*apiv1.Cluster, error)
+ GetEnv(key string) ([]string, error)
+}
+
+type cacheClientImpl struct {
+ cli *http.Client
+}
+
// GetCluster gets the required cluster from cache
-func GetCluster() (*apiv1.Cluster, error) {
- bytes, err := httpCacheGet(cache.ClusterKey)
+func (c *cacheClientImpl) GetCluster() (*apiv1.Cluster, error) {
+ bytes, err := c.httpCacheGet(cache.ClusterKey)
if err != nil {
return nil, err
}
@@ -49,8 +57,8 @@ func GetCluster() (*apiv1.Cluster, error) {
}
// GetEnv gets the environment variables from cache
-func GetEnv(key string) ([]string, error) {
- bytes, err := httpCacheGet(key)
+func (c *cacheClientImpl) GetEnv(key string) ([]string, error) {
+ bytes, err := c.httpCacheGet(key)
if err != nil {
return nil, err
}
@@ -66,11 +74,11 @@ func GetEnv(key string) ([]string, error) {
// httpCacheGet retrieves an object from the cache.
// In case of failures it retries for a while before giving up
-func httpCacheGet(urlPath string) ([]byte, error) {
+func (c *cacheClientImpl) httpCacheGet(urlPath string) ([]byte, error) {
var bytes []byte
err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, func() error {
var err error
- bytes, err = get(urlPath)
+ bytes, err = c.get(urlPath)
return err
})
if err != nil {
@@ -80,8 +88,8 @@ func httpCacheGet(urlPath string) ([]byte, error) {
return bytes, nil
}
-func get(urlPath string) ([]byte, error) {
- resp, err := http.Get(url.Local(url.PathCache+urlPath, url.LocalPort))
+func (c *cacheClientImpl) get(urlPath string) ([]byte, error) {
+ resp, err := c.cli.Get(url.Local(url.PathCache+urlPath, url.LocalPort))
if err != nil {
return nil, err
}
diff --git a/pkg/management/postgres/webserver/client/local/cluster.go b/pkg/management/postgres/webserver/client/local/cluster.go
new file mode 100644
index 0000000000..d1229d4f55
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/local/cluster.go
@@ -0,0 +1,71 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package local
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
+)
+
+// ClusterClient is the interface to interact with the uncategorized endpoints
+type ClusterClient interface {
+ // SetWALArchiveStatusCondition sets the wal-archive status condition.
+ // An empty errMessage means that the archive process was successful.
+ // Returns any error encountered during the request.
+ SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error
+}
+
+// clusterClientImpl a client to interact with the uncategorized endpoints
+type clusterClientImpl struct {
+ cli *http.Client
+}
+
+func (c *clusterClientImpl) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error {
+ contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition)
+
+ asr := webserver.ArchiveStatusRequest{
+ Error: errMessage,
+ }
+
+ encoded, err := json.Marshal(&asr)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.Post(
+ url.Local(url.PathWALArchiveStatusCondition, url.LocalPort),
+ "application/json",
+ bytes.NewBuffer(encoded),
+ )
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if errClose := resp.Body.Close(); errClose != nil {
+ contextLogger.Error(err, "while closing response body")
+ }
+ }()
+
+ return nil
+}
diff --git a/pkg/management/postgres/webserver/client/local/doc.go b/pkg/management/postgres/webserver/client/local/doc.go
new file mode 100644
index 0000000000..1fdc0bca97
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/local/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package local provides a client to interact with the local webserver endpoints.
+package local
diff --git a/pkg/management/postgres/webserver/client/local/local.go b/pkg/management/postgres/webserver/client/local/local.go
new file mode 100644
index 0000000000..5b91d3e41a
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/local/local.go
@@ -0,0 +1,62 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package local
+
+import (
+ "time"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common"
+)
+
+// Client is an entity capable of interacting with the local webserver endpoints
+type Client interface {
+ Backup() BackupClient
+ Cache() CacheClient
+ Cluster() ClusterClient
+}
+
+type localClient struct {
+ backup BackupClient
+ cache CacheClient
+ cluster ClusterClient
+}
+
+// NewClient returns a new instance of Client
+func NewClient() Client {
+ const connectionTimeout = 2 * time.Second
+ const requestTimeout = 30 * time.Second
+
+ standardClient := common.NewHTTPClient(connectionTimeout, requestTimeout)
+
+ return &localClient{
+ backup: &backupClientImpl{cli: standardClient},
+ cache: &cacheClientImpl{cli: standardClient},
+ cluster: &clusterClientImpl{cli: standardClient},
+ }
+}
+
+func (c *localClient) Backup() BackupClient {
+ return c.backup
+}
+
+func (c *localClient) Cache() CacheClient {
+ return c.cache
+}
+
+func (c *localClient) Cluster() ClusterClient {
+ return c.cluster
+}
diff --git a/pkg/management/postgres/webserver/client/local/request.go b/pkg/management/postgres/webserver/client/local/request.go
new file mode 100644
index 0000000000..efc3a2c7c5
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/local/request.go
@@ -0,0 +1,69 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package local
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
+)
+
+func executeRequestWithError[T any](
+ ctx context.Context,
+ cli *http.Client,
+ req *http.Request,
+ ignoreBodyErrors bool,
+) (*webserver.Response[T], error) {
+ contextLogger := log.FromContext(ctx)
+
+ resp, err := cli.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("while executing http request: %w", err)
+ }
+
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ contextLogger.Error(err, "while closing response body")
+ }
+ }()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("while reading the response body: %w", err)
+ }
+
+ if resp.StatusCode == http.StatusInternalServerError {
+ return nil, fmt.Errorf("encountered an internal server error status code 500 with body: %s", string(body))
+ }
+
+ var result webserver.Response[T]
+ if err := json.Unmarshal(body, &result); err != nil {
+ return nil, fmt.Errorf("while unmarshalling the body, body: %s err: %w", string(body), err)
+ }
+ if result.Error != nil && !ignoreBodyErrors {
+ return nil, fmt.Errorf("body contained an error code: %s and message: %s",
+ result.Error.Code, result.Error.Message)
+ }
+
+ return &result, nil
+}
diff --git a/pkg/management/postgres/webserver/client/remote/doc.go b/pkg/management/postgres/webserver/client/remote/doc.go
new file mode 100644
index 0000000000..f5a83f43e9
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/remote/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package remote contains the client capable of querying the webserver remote endpoint.
+package remote
diff --git a/pkg/resources/instance/client.go b/pkg/management/postgres/webserver/client/remote/instance.go
similarity index 91%
rename from pkg/resources/instance/client.go
rename to pkg/management/postgres/webserver/client/remote/instance.go
index 1c80752e69..b83111f850 100644
--- a/pkg/resources/instance/client.go
+++ b/pkg/management/postgres/webserver/client/remote/instance.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package instance
+package remote
import (
"context"
@@ -34,9 +34,9 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -55,8 +55,8 @@ var requestRetry = wait.Backoff{
Jitter: 0.1,
}
-// Client a http client capable of querying the instance HTTP endpoints
-type Client interface {
+// InstanceClient a http client capable of querying the instance HTTP endpoints
+type InstanceClient interface {
// GetStatusFromInstances gets the replication status from the PostgreSQL instances,
// the returned list is sorted in order to have the primary as the first element
// and the other instances in their election order
@@ -83,7 +83,7 @@ type Client interface {
ArchivePartialWAL(context.Context, *corev1.Pod) (string, error)
}
-type statusClient struct {
+type instanceClientImpl struct {
*http.Client
}
@@ -97,18 +97,18 @@ func (i StatusError) Error() string {
return fmt.Sprintf("error status code: %v, body: %v", i.StatusCode, i.Body)
}
-// NewStatusClient returns a client capable of querying the instance HTTP endpoints
-func NewStatusClient() Client {
+// newInstanceClient returns a client capable of querying the instance HTTP endpoints
+func newInstanceClient() InstanceClient {
const connectionTimeout = 2 * time.Second
const requestTimeout = 10 * time.Second
- return &statusClient{Client: resources.NewHTTPClient(connectionTimeout, requestTimeout)}
+ return &instanceClientImpl{Client: common.NewHTTPClient(connectionTimeout, requestTimeout)}
}
// extractInstancesStatus extracts the status of the underlying PostgreSQL instance from
// the requested Pod, via the instance manager. In case of failure, errors are passed
// in the result list
-func (r statusClient) extractInstancesStatus(
+func (r instanceClientImpl) extractInstancesStatus(
ctx context.Context,
activePods []corev1.Pod,
) postgres.PostgresqlStatusList {
@@ -123,7 +123,7 @@ func (r statusClient) extractInstancesStatus(
// getReplicaStatusFromPodViaHTTP retrieves the status of PostgreSQL pod via HTTP, retrying
// the request if some communication error is encountered
-func (r *statusClient) getReplicaStatusFromPodViaHTTP(
+func (r *instanceClientImpl) getReplicaStatusFromPodViaHTTP(
ctx context.Context,
pod corev1.Pod,
) (result postgres.PostgresqlStatus) {
@@ -161,7 +161,7 @@ func (r *statusClient) getReplicaStatusFromPodViaHTTP(
return result
}
-func (r *statusClient) GetStatusFromInstances(
+func (r *instanceClientImpl) GetStatusFromInstances(
ctx context.Context,
pods corev1.PodList,
) postgres.PostgresqlStatusList {
@@ -184,7 +184,7 @@ func (r *statusClient) GetStatusFromInstances(
return status
}
-func (r *statusClient) GetPgControlDataFromInstance(
+func (r *instanceClientImpl) GetPgControlDataFromInstance(
ctx context.Context,
pod *corev1.Pod,
) (string, error) {
@@ -231,7 +231,7 @@ func (r *statusClient) GetPgControlDataFromInstance(
}
// UpgradeInstanceManager upgrades the instance manager to the passed availableArchitecture
-func (r *statusClient) UpgradeInstanceManager(
+func (r *instanceClientImpl) UpgradeInstanceManager(
ctx context.Context,
pod *corev1.Pod,
availableArchitecture *utils.AvailableArchitecture,
@@ -293,7 +293,7 @@ func isEOF(err error) bool {
}
// rawInstanceStatusRequest retrieves the status of PostgreSQL pods via an HTTP request with GET method.
-func (r *statusClient) rawInstanceStatusRequest(
+func (r *instanceClientImpl) rawInstanceStatusRequest(
ctx context.Context,
pod corev1.Pod,
) (result postgres.PostgresqlStatus) {
@@ -376,7 +376,7 @@ func GetStatusSchemeFromPod(pod *corev1.Pod) HTTPScheme {
return schemeHTTP
}
-func (r *statusClient) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) (string, error) {
+func (r *instanceClientImpl) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) (string, error) {
contextLogger := log.FromContext(ctx)
statusURL := url.Build(
diff --git a/pkg/management/postgres/webserver/client/remote/remote.go b/pkg/management/postgres/webserver/client/remote/remote.go
new file mode 100644
index 0000000000..2b6a375e0e
--- /dev/null
+++ b/pkg/management/postgres/webserver/client/remote/remote.go
@@ -0,0 +1,37 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remote
+
+// Client is the interface to interact with the remote webserver
+type Client interface {
+ Instance() InstanceClient
+}
+
+type remoteClientImpl struct {
+ instance InstanceClient
+}
+
+func (r *remoteClientImpl) Instance() InstanceClient {
+ return r.instance
+}
+
+// NewClient creates a new remote client
+func NewClient() Client {
+ return &remoteClientImpl{
+ instance: newInstanceClient(),
+ }
+}
diff --git a/pkg/management/postgres/webserver/local_client.go b/pkg/management/postgres/webserver/local_client.go
deleted file mode 100644
index 4f2ec068f5..0000000000
--- a/pkg/management/postgres/webserver/local_client.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package webserver
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "time"
-
- "github.com/cloudnative-pg/machinery/pkg/log"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
-)
-
-// LocalClient is an entity capable of interacting with the local webserver endpoints
-type LocalClient interface {
- // SetWALArchiveStatusCondition sets the wal-archive status condition.
- // An empty errMessage means that the archive process was successful.
- // Returns any error encountered during the request.
- SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error
-}
-
-type localClient struct {
- cli *http.Client
-}
-
-// NewLocalClient returns a new instance of LocalClient
-func NewLocalClient() LocalClient {
- const connectionTimeout = 2 * time.Second
- const requestTimeout = 30 * time.Second
-
- return &localClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)}
-}
-
-func (c *localClient) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error {
- contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition)
-
- asr := ArchiveStatusRequest{
- Error: errMessage,
- }
-
- encoded, err := json.Marshal(&asr)
- if err != nil {
- return err
- }
-
- resp, err := http.Post(
- url.Local(url.PathWALArchiveStatusCondition, url.LocalPort),
- "application/json",
- bytes.NewBuffer(encoded),
- )
- if err != nil {
- return err
- }
- defer func() {
- if errClose := resp.Body.Close(); errClose != nil {
- contextLogger.Error(err, "while closing response body")
- }
- }()
-
- return nil
-}
diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go
index 64f469f2bb..1ae00b82d1 100644
--- a/pkg/management/postgres/webserver/metricserver/pg_collector.go
+++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go
@@ -29,9 +29,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
- cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
m "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
postgresconf "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
)
@@ -92,7 +92,7 @@ func NewExporter(instance *postgres.Instance) *Exporter {
return &Exporter{
instance: instance,
Metrics: newMetrics(),
- getCluster: cacheClient.GetCluster,
+ getCluster: local.NewClient().Cache().GetCluster,
}
}
diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go
index 50280a21cc..47a502a6d8 100644
--- a/pkg/management/postgres/webserver/metricserver/wal.go
+++ b/pkg/management/postgres/webserver/metricserver/wal.go
@@ -24,8 +24,8 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
- cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -220,7 +220,7 @@ func collectPGWalSettings(exporter *Exporter, db *sql.DB) error {
}
func getWalVolumeSize() float64 {
- cluster, err := cacheClient.GetCluster()
+ cluster, err := local.NewClient().Cache().GetCluster()
if err != nil || !cluster.ShouldCreateWalArchiveVolume() {
return 0
}
diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go
index e1d74097a1..62de9f4bb3 100644
--- a/pkg/reconciler/backup/volumesnapshot/online.go
+++ b/pkg/reconciler/backup/volumesnapshot/online.go
@@ -26,14 +26,15 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local"
)
type onlineExecutor struct {
- backupClient webserver.BackupClient
+ backupClient local.BackupClient
}
func newOnlineExecutor() *onlineExecutor {
- return &onlineExecutor{backupClient: webserver.NewBackupClient()}
+ return &onlineExecutor{backupClient: local.NewClient().Backup()}
}
func (o *onlineExecutor) finalize(
diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go
index 4d2d633ac4..730fa8f241 100644
--- a/pkg/reconciler/backup/volumesnapshot/reconciler.go
+++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go
@@ -35,8 +35,8 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -44,7 +44,7 @@ import (
type Reconciler struct {
cli client.Client
recorder record.EventRecorder
- instanceStatusClient instance.Client
+ instanceStatusClient remote.InstanceClient
}
// ExecutorBuilder is a struct capable of creating a Reconciler
@@ -61,7 +61,7 @@ func NewReconcilerBuilder(
executor: Reconciler{
cli: cli,
recorder: recorder,
- instanceStatusClient: instance.NewStatusClient(),
+ instanceStatusClient: remote.NewClient().Instance(),
},
}
}
diff --git a/pkg/reconciler/replicaclusterswitch/reconciler.go b/pkg/reconciler/replicaclusterswitch/reconciler.go
index afb00515fb..15342e9adc 100644
--- a/pkg/reconciler/replicaclusterswitch/reconciler.go
+++ b/pkg/reconciler/replicaclusterswitch/reconciler.go
@@ -29,8 +29,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -39,7 +39,7 @@ func Reconcile(
ctx context.Context,
cli client.Client,
cluster *apiv1.Cluster,
- instanceClient instance.Client,
+ instanceClient remote.InstanceClient,
instances postgres.PostgresqlStatusList,
) (*ctrl.Result, error) {
if !cluster.IsReplica() {
@@ -150,7 +150,7 @@ func reconcileDemotionToken(
ctx context.Context,
cli client.Client,
cluster *apiv1.Cluster,
- instanceClient instance.Client,
+ instanceClient remote.InstanceClient,
instances postgres.PostgresqlStatusList,
) (*ctrl.Result, error) {
contextLogger := log.FromContext(ctx).WithName("replica_cluster")
diff --git a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go
index 0af100575c..9b7413bf30 100644
--- a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go
+++ b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go
@@ -23,8 +23,8 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -38,7 +38,7 @@ var errPostgresNotShutDown = fmt.Errorf("expected postmaster to be shut down")
func generateDemotionToken(
ctx context.Context,
cluster *apiv1.Cluster,
- instanceClient instance.Client,
+ instanceClient remote.InstanceClient,
instancesStatus postgres.PostgresqlStatusList,
) (string, error) {
contextLogger := log.FromContext(ctx).WithName("shutdown_checkpoint")
From caacdbab0f75530599f3bc1efd6fcf8aa597cfa0 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Sat, 7 Dec 2024 10:36:23 +0100
Subject: [PATCH 225/836] docs: release notes for 1.25.0-rc1 (#6275)
Closes #6228
Signed-off-by: Gabriele Bartolini
Co-authored-by: Marco Nenciarini
---
docs/src/preview_version.md | 2 -
docs/src/release_notes.md | 1 +
docs/src/release_notes/v1.25.md | 70 +++++++++++++++++++++++++++++++++
docs/src/supported_releases.md | 12 +++---
4 files changed, 77 insertions(+), 8 deletions(-)
create mode 100644 docs/src/release_notes/v1.25.md
diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md
index 1331deff22..8f354f67ae 100644
--- a/docs/src/preview_version.md
+++ b/docs/src/preview_version.md
@@ -35,12 +35,10 @@ are not backwards compatible and could be removed entirely.
There are currently no preview versions available.
-
diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md
index f10bd1cd6d..fe2a723507 100644
--- a/docs/src/release_notes.md
+++ b/docs/src/release_notes.md
@@ -2,6 +2,7 @@
History of user-visible changes for CloudNativePG, classified for each minor release.
+- [CloudNativePG 1.25 - Release Candidate](release_notes/v1.25.md)
- [CloudNativePG 1.24](release_notes/v1.24.md)
- [CloudNativePG 1.23](release_notes/v1.23.md)
diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md
new file mode 100644
index 0000000000..4996532171
--- /dev/null
+++ b/docs/src/release_notes/v1.25.md
@@ -0,0 +1,70 @@
+# Release notes for CloudNativePG 1.25
+
+History of user-visible changes in the 1.25 minor release of CloudNativePG.
+
+For a complete list of changes, please refer to the
+[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25)
+on the release branch in GitHub.
+
+## Version 1.25.0-rc1
+
+**Release Date:** December 9, 2024
+
+### Features
+
+- **Declarative Database Management**: Introduce the `Database` Custom Resource
+ Definition (CRD), enabling users to create and manage PostgreSQL databases
+ declaratively within a cluster. (#5325)
+
+- **Logical Replication Management**: Add `Publication` and `Subscription` CRDs
+ for declarative management of PostgreSQL logical replication. These simplify
+ replication setup and facilitate online migrations to CloudNativePG. (#5329)
+
+### Enhancements
+
+- Add the `dataDurability` option to the `.spec.postgresql.synchronous` stanza,
+ allowing users to choose between `required` (default) or `preferred`
+ durability in synchronous replication. (#5878)
+- Enable customization of startup, liveness, and readiness probes through the
+ `.spec.probes` stanza. (#6266)
+- Support additional `pg_dump` and `pg_restore` options to enhance database
+ import flexibility. (#6214)
+- Add support for `maxConcurrentReconciles` in the CloudNativePG controller and
+ set the default to 10, improving the operator's ability to efficiently manage
+ larger deployments out of the box. (#5678)
+- Add the `cnpg.io/userType` label to secrets generated for predefined users,
+ specifically `superuser` and `app`. (#4392)
+- `cnpg` plugin:
+ - Enhance the `backup` command to support plugins. (#6045)
+ - Honor the `User-Agent` header in HTTP requests with the API server. (#6153)
+
+### Bug Fixes
+
+- Ensure the former primary flushes its WAL file queue to the archive before
+ re-synchronizing as a replica, reducing recovery times and enhancing data
+ consistency during failovers. (#6141)
+- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265)
+- Update the operator to set the cluster phase to `Unrecoverable` when
+ all previously generated `PersistentVolumeClaims` are missing. (#6170)
+- Fix the parsing of the `synchronous_standby_names` GUC when
+ `.spec.postgresql.synchronous.method` is set to `first`. (#5955)
+- Correct role changes to apply at the transaction level instead of the
+ database context. (#6064)
+- Remove the `primary_slot_name` definition from the `override.conf` file on
+ the primary to ensure it is always empty. (#6219)
+- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods
+ to enable seamless access to the `pgbouncer` virtual database using `psql`
+ from within the container. (#6247)
+- Remove unnecessary updates to the Cluster status when verifying changes in
+ the image catalog. (#6277)
+- `cnpg` plugin:
+ - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257)
+ - Avoid displaying physical backups block when empty with `status` command. (#5998)
+
+### Supported Versions
+
+- **Kubernetes**: 1.31, 1.30, and 1.29
+- **PostgreSQL**: 17, 16, 15, 14, and 13
+ - Default image: PostgreSQL 17.2
+ - Officially dropped support for PostgreSQL 12
+ - PostgreSQL 13 support ends on November 12, 2025
diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md
index 9717ee5cc0..afdffb9ebd 100644
--- a/docs/src/supported_releases.md
+++ b/docs/src/supported_releases.md
@@ -80,11 +80,11 @@ Git tags for versions are prefixed with `v`.
## Support status of CloudNativePG releases
-| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
-|-----------------|----------------------|---------------------|---------------------|-------------------------------|---------------------------|-----------------------------|
-| 1.25.x | Yes | December XX, 2024 | ~ February, 2025 | 1.29, 1.30, 1.31, 1.32 (??) | 1.27, 1.28 | 13 - 17 |
-| 1.24.x | Yes | August 22, 2024 | February XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 |
-| main | No, development only | | | | | 13 - 17 |
+| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
+|-----------------|----------------------|----------------|---------------------|-------------------------------|---------------------------|-----------------------------|
+| 1.25.x | No (RC) | Dec XX, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 (!) | 1.27, 1.28 | 13 - 17 |
+| 1.24.x | Yes | Aug 22, 2024 | Feb XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 |
+| main | No, development only | | | | | 13 - 17 |
@@ -121,7 +121,7 @@ version of PostgreSQL, we might not be able to help you.
| Version | Release date | End of life |
|-----------------|-----------------------|---------------------------|
-| 1.25.0 | Nov/Dec, 2024 | May/Jun, 2025 |
+| 1.25.0 | Dec, 2024 | May/Jun, 2025 |
| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 |
| 1.27.0 | Jun, 2025 | Dec, 2025 |
From 480f80593e922d01be27be25e71db8992c57bc9a Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Sat, 7 Dec 2024 13:49:33 +0100
Subject: [PATCH 226/836] docs(security): mention new CRDs (#6296)
Mention `Database`, `Publication`, and `Subscription` CRDs in the
security page.
Closes #6241
Signed-off-by: Gabriele Bartolini
---
docs/src/security.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/src/security.md b/docs/src/security.md
index 6eab222826..ec14f35d46 100644
--- a/docs/src/security.md
+++ b/docs/src/security.md
@@ -113,8 +113,8 @@ more about these roles, you can use the `kubectl describe clusterrole` or
The above permissions are exclusively reserved for the operator's service
account to interact with the Kubernetes API server. They are not directly
accessible by the users of the operator that interact only with `Cluster`,
- `Pooler`, `Backup`, `ScheduledBackup`, `ImageCatalog` and
- `ClusterImageCatalog` resources.
+ `Pooler`, `Backup`, `ScheduledBackup`, `Database`, `Publication`,
+ `Subscription`, `ImageCatalog` and `ClusterImageCatalog` resources.
Below we provide some examples and, most importantly, the reasons why
CloudNativePG requires full or partial management of standard Kubernetes
From 83222ae319d4a4e3e79568831a0358d0f2e4696a Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Mon, 9 Dec 2024 11:08:23 +0100
Subject: [PATCH 227/836] Version tag to 1.25.0-rc1 (#6299)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Niccolò Fei
Co-authored-by: Niccolò Fei
---
docs/src/installation_upgrade.md | 6 +-
docs/src/kubectl-plugin.md | 30 +-
pkg/versions/versions.go | 6 +-
releases/cnpg-1.25.0-rc1.yaml | 17645 +++++++++++++++++++++++++++++
4 files changed, 17666 insertions(+), 21 deletions(-)
create mode 100644 releases/cnpg-1.25.0-rc1.yaml
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md
index c948a61148..182ae94d44 100644
--- a/docs/src/installation_upgrade.md
+++ b/docs/src/installation_upgrade.md
@@ -7,12 +7,12 @@
The operator can be installed like any other resource in Kubernetes,
through a YAML manifest applied via `kubectl`.
-You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml)
+You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml)
for this minor release as follows:
```sh
kubectl apply --server-side -f \
- https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml
+ https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml
```
You can verify that with:
@@ -72,7 +72,7 @@ specific minor release, you can just run:
```sh
curl -sSfL \
- https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.24/manifests/operator-manifest.yaml | \
+ https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.25/manifests/operator-manifest.yaml | \
kubectl apply --server-side -f -
```
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index 2b430dd9e0..45801e223c 100644
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -30,11 +30,11 @@ them in your systems.
#### Debian packages
-For example, let's install the 1.24.1 release of the plugin, for an Intel based
+For example, let's install the 1.25.0-rc1 release of the plugin, for an Intel based
64 bit server. First, we download the right `.deb` file.
```sh
-wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.deb \
+wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.deb \
--output-document kube-plugin.deb
```
@@ -45,17 +45,17 @@ $ sudo dpkg -i kube-plugin.deb
Selecting previously unselected package cnpg.
(Reading database ... 6688 files and directories currently installed.)
Preparing to unpack kube-plugin.deb ...
-Unpacking cnpg (1.24.1) ...
-Setting up cnpg (1.24.1) ...
+Unpacking cnpg (1.25.0-rc1) ...
+Setting up cnpg (1.25.0-rc1) ...
```
#### RPM packages
-As in the example for `.rpm` packages, let's install the 1.24.1 release for an
+As in the example for `.rpm` packages, let's install the 1.25.0-rc1 release for an
Intel 64 bit machine. Note the `--output` flag to provide a file name.
```sh
-curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.rpm \
+curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.rpm \
--output kube-plugin.rpm
```
@@ -69,7 +69,7 @@ Dependencies resolved.
Package Architecture Version Repository Size
====================================================================================================
Installing:
- cnpg x86_64 1.24.1-1 @commandline 20 M
+ cnpg x86_64 1.25.0-rc1-1 @commandline 20 M
Transaction Summary
====================================================================================================
@@ -277,9 +277,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00
Instances status
Name Current LSN Replication role Status QoS Manager Version Node
---- ----------- ---------------- ------ --- --------------- ----
-sandbox-1 0/604DE38 Primary OK BestEffort 1.24.1 k8s-eu-worker
-sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2
-sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker
+sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker
+sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2
+sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker
```
If you require more detailed status information, use the `--verbose` option (or
@@ -333,9 +333,9 @@ sandbox-primary primary 1 1 1
Instances status
Name Current LSN Replication role Status QoS Manager Version Node
---- ----------- ---------------- ------ --- --------------- ----
-sandbox-1 0/6053720 Primary OK BestEffort 1.24.1 k8s-eu-worker
-sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2
-sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker
+sandbox-1 0/6053720 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker
+sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2
+sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker
```
With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can
@@ -558,12 +558,12 @@ Archive: report_operator_.zip
```output
====== Begin of Previous Log =====
-2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
====== End of Previous Log =====
-2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
```
diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go
index 6189bdad1f..91f8dcc30e 100644
--- a/pkg/versions/versions.go
+++ b/pkg/versions/versions.go
@@ -20,13 +20,13 @@ package versions
const (
// Version is the version of the operator
- Version = "1.24.1"
+ Version = "1.25.0-rc1"
// DefaultImageName is the default image used by the operator to create pods
DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2"
// DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL
- DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1"
+ DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1"
)
// BuildInfo is a struct containing all the info about the build
@@ -36,7 +36,7 @@ type BuildInfo struct {
var (
// buildVersion injected during the build
- buildVersion = "1.24.1"
+ buildVersion = "1.25.0-rc1"
// buildCommit injected during the build
buildCommit = "none"
diff --git a/releases/cnpg-1.25.0-rc1.yaml b/releases/cnpg-1.25.0-rc1.yaml
new file mode 100644
index 0000000000..d69fecaf08
--- /dev/null
+++ b/releases/cnpg-1.25.0-rc1.yaml
@@ -0,0 +1,17645 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-system
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: backups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Backup
+ listKind: BackupList
+ plural: backups
+ singular: backup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.method
+ name: Method
+ type: string
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.error
+ name: Error
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Backup is the Schema for the backups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the backup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ - plugin
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the backup. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without providing
+ explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ backupId:
+ description: The ID of the Barman backup
+ type: string
+ backupLabelFile:
+ description: Backup label file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ backupName:
+ description: The Name of the Barman backup
+ type: string
+ beginLSN:
+ description: The starting xlog
+ type: string
+ beginWal:
+ description: The starting WAL
+ type: string
+ commandError:
+ description: The backup command output in case of error
+ type: string
+ commandOutput:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data. This may not be populated in case of errors.
+ type: string
+ encryption:
+ description: Encryption method required to S3 API
+ type: string
+ endLSN:
+ description: The ending xlog
+ type: string
+ endWal:
+ description: The ending WAL
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ error:
+ description: The detected error
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google Cloud
+ Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage JSON
+ file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ instanceID:
+ description: Information to identify the instance where the backup
+ has been taken from
+ properties:
+ ContainerID:
+ description: The container ID
+ type: string
+ podName:
+ description: The pod name
+ type: string
+ type: object
+ method:
+ description: The backup method being used
+ type: string
+ online:
+ description: Whether the backup was online/hot (`true`) or offline/cold
+ (`false`)
+ type: boolean
+ phase:
+ description: The last backup status
+ type: string
+ pluginMetadata:
+ additionalProperties:
+ type: string
+ description: A map containing the plugin metadata
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without providing
+ explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the region
+ name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ snapshotBackupStatus:
+ description: Status of the volumeSnapshot backup
+ properties:
+ elements:
+ description: The elements list, populated with the gathered volume
+ snapshots
+ items:
+ description: BackupSnapshotElementStatus is a volume snapshot
+ that is part of a volume snapshot method backup
+ properties:
+ name:
+ description: Name is the snapshot resource name
+ type: string
+ tablespaceName:
+ description: |-
+ TablespaceName is the name of the snapshotted tablespace. Only set
+ when type is PG_TABLESPACE
+ type: string
+ type:
+ description: Type is tho role of the snapshot in the cluster,
+ such as PG_DATA, PG_WAL and PG_TABLESPACE
+ type: string
+ required:
+ - name
+ - type
+ type: object
+ type: array
+ type: object
+ startedAt:
+ description: When the backup was started
+ format: date-time
+ type: string
+ stoppedAt:
+ description: When the backup was terminated
+ format: date-time
+ type: string
+ tablespaceMapFile:
+ description: Tablespace map file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: clusterimagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ClusterImageCatalog
+ listKind: ClusterImageCatalogList
+ plural: clusterimagecatalogs
+ singular: clusterimagecatalog
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterImageCatalog is the Schema for the clusterimagecatalogs
+ API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ClusterImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: clusters.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Cluster
+ listKind: ClusterList
+ plural: clusters
+ singular: cluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Number of instances
+ jsonPath: .status.instances
+ name: Instances
+ type: integer
+ - description: Number of ready instances
+ jsonPath: .status.readyInstances
+ name: Ready
+ type: integer
+ - description: Cluster current status
+ jsonPath: .status.phase
+ name: Status
+ type: string
+ - description: Primary pod
+ jsonPath: .status.currentPrimary
+ name: Primary
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Cluster is the Schema for the PostgreSQL API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the cluster.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ affinity:
+ description: Affinity/Anti-affinity rules for Pods
+ properties:
+ additionalPodAffinity:
+ description: AdditionalPodAffinity allows to specify pod affinity
+ terms to be passed to all the cluster's pods.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ additionalPodAntiAffinity:
+ description: |-
+ AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated
+ by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ enablePodAntiAffinity:
+ description: |-
+ Activates anti-affinity for the pods. The operator will define pods
+ anti-affinity unless this field is explicitly set to false
+ type: boolean
+ nodeAffinity:
+ description: |-
+ NodeAffinity describes node affinity scheduling rules for the pod.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the
+ corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is map of key-value pairs used to define the nodes on which
+ the pods can run.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ podAntiAffinityType:
+ description: |-
+ PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be
+ considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or
+ "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are
+ added if all the existing nodes don't match the required pod anti-affinity rule.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ type: string
+ tolerations:
+ description: |-
+ Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run
+ on tainted nodes.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologyKey:
+ description: |-
+ TopologyKey to use for anti-affinity configuration. See k8s documentation
+ for more info on that
+ type: string
+ type: object
+ backup:
+ description: The configuration to be used for backups
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage
+ JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the
+ region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ archiveAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-archive'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL archive process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ restoreAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-restore'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL restore process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ type: object
+ required:
+ - destinationPath
+ type: object
+ retentionPolicy:
+ description: |-
+ RetentionPolicy is the retention policy to be used for backups
+ and WALs (i.e. '60d'). The retention policy is expressed in the form
+ of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
+ days, weeks, months.
+ It's currently only applicable when using the BarmanObjectStore method.
+ pattern: ^[1-9][0-9]*[dwm]$
+ type: string
+ target:
+ default: prefer-standby
+ description: |-
+ The policy to decide which instance should perform backups. Available
+ options are empty string, which will default to `prefer-standby` policy,
+ `primary` to have backups run always on primary instances, `prefer-standby`
+ to have backups run preferably on the most updated standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ volumeSnapshot:
+ description: VolumeSnapshot provides the configuration for the
+ execution of volume snapshot backups.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations key-value pairs that will be added
+ to .metadata.annotations snapshot resources.
+ type: object
+ className:
+ description: |-
+ ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim.
+ It is the default class for the other types if no specific class is present
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels are key-value pairs that will be added
+ to .metadata.labels snapshot resources.
+ type: object
+ online:
+ default: true
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ type: boolean
+ onlineConfiguration:
+ default:
+ immediateCheckpoint: false
+ waitForArchive: true
+ description: Configuration parameters to control the online/hot
+ backup with volume snapshots
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ snapshotOwnerReference:
+ default: none
+ description: SnapshotOwnerReference indicates the type of
+ owner reference the snapshot should have
+ enum:
+ - none
+ - cluster
+ - backup
+ type: string
+ tablespaceClassName:
+ additionalProperties:
+ type: string
+ description: |-
+ TablespaceClassName specifies the Snapshot Class to be used for the tablespaces.
+ defaults to the PGDATA Snapshot Class, if set
+ type: object
+ walClassName:
+ description: WalClassName specifies the Snapshot Class to
+ be used for the PG_WAL PersistentVolumeClaim.
+ type: string
+ type: object
+ type: object
+ bootstrap:
+ description: Instructions to bootstrap this cluster
+ properties:
+ initdb:
+ description: Bootstrap the cluster via initdb
+ properties:
+ builtinLocale:
+ description: |-
+ Specifies the locale name when the builtin provider is used.
+ This option requires `localeProvider` to be set to `builtin`.
+ Available from PostgreSQL 17.
+ type: string
+ dataChecksums:
+ description: |-
+ Whether the `-k` option should be passed to initdb,
+ enabling checksums on data pages (default: `false`)
+ type: boolean
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ encoding:
+ description: The value to be passed as option `--encoding`
+ for initdb (default:`UTF8`)
+ type: string
+ icuLocale:
+ description: |-
+ Specifies the ICU locale when the ICU provider is used.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 15.
+ type: string
+ icuRules:
+ description: |-
+ Specifies additional collation rules to customize the behavior of the default collation.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 16.
+ type: string
+ import:
+ description: |-
+ Bootstraps the new cluster by importing data from an existing PostgreSQL
+ instance using logical backup (`pg_dump` and `pg_restore`)
+ properties:
+ databases:
+ description: The databases to import
+ items:
+ type: string
+ type: array
+ pgDumpExtraOptions:
+ description: |-
+ List of custom options to pass to the `pg_dump` command. IMPORTANT:
+ Use these options with caution and at your own risk, as the operator
+ does not validate their content. Be aware that certain options may
+ conflict with the operator's intended functionality or design.
+ items:
+ type: string
+ type: array
+ pgRestoreExtraOptions:
+ description: |-
+ List of custom options to pass to the `pg_restore` command. IMPORTANT:
+ Use these options with caution and at your own risk, as the operator
+ does not validate their content. Be aware that certain options may
+ conflict with the operator's intended functionality or design.
+ items:
+ type: string
+ type: array
+ postImportApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after is imported - to be used with extreme care
+ (by default empty). Only available in microservice type.
+ items:
+ type: string
+ type: array
+ roles:
+ description: The roles to import
+ items:
+ type: string
+ type: array
+ schemaOnly:
+ description: |-
+ When set to true, only the `pre-data` and `post-data` sections of
+ `pg_restore` are invoked, avoiding data import. Default: `false`.
+ type: boolean
+ source:
+ description: The source of the import
+ properties:
+ externalCluster:
+ description: The name of the externalCluster used
+ for import
+ type: string
+ required:
+ - externalCluster
+ type: object
+ type:
+ description: The import type. Can be `microservice` or
+ `monolith`.
+ enum:
+ - microservice
+ - monolith
+ type: string
+ required:
+ - databases
+ - source
+ - type
+ type: object
+ locale:
+ description: Sets the default collation order and character
+ classification in the new database.
+ type: string
+ localeCType:
+ description: The value to be passed as option `--lc-ctype`
+ for initdb (default:`C`)
+ type: string
+ localeCollate:
+ description: The value to be passed as option `--lc-collate`
+ for initdb (default:`C`)
+ type: string
+ localeProvider:
+ description: |-
+ This option sets the locale provider for databases created in the new cluster.
+ Available from PostgreSQL 16.
+ type: string
+ options:
+ description: |-
+ The list of options that must be passed to initdb when creating the cluster.
+ Deprecated: This could lead to inconsistent configurations,
+ please use the explicit provided parameters instead.
+ If defined, explicit values will be ignored.
+ items:
+ type: string
+ type: array
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ postInitApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitApplicationSQLRefs:
+ description: |-
+ List of references to ConfigMaps or Secrets containing SQL files
+ to be executed as a superuser in the application database right after
+ the cluster has been created. The references are processed in a specific order:
+ first, all Secrets are processed, followed by all ConfigMaps.
+ Within each group, the processing order follows the sequence specified
+ in their respective arrays.
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ postInitSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the `postgres`
+ database right after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitSQLRefs:
+ description: |-
+ List of references to ConfigMaps or Secrets containing SQL files
+ to be executed as a superuser in the `postgres` database right after
+ the cluster has been created. The references are processed in a specific order:
+ first, all Secrets are processed, followed by all ConfigMaps.
+ Within each group, the processing order follows the sequence specified
+ in their respective arrays.
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ postInitTemplateSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the `template1`
+ database right after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitTemplateSQLRefs:
+ description: |-
+ List of references to ConfigMaps or Secrets containing SQL files
+ to be executed as a superuser in the `template1` database right after
+ the cluster has been created. The references are processed in a specific order:
+ first, all Secrets are processed, followed by all ConfigMaps.
+ Within each group, the processing order follows the sequence specified
+ in their respective arrays.
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ walSegmentSize:
+ description: |-
+ The value in megabytes (1 to 1024) to be passed to the `--wal-segsize`
+ option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
+ maximum: 1024
+ minimum: 1
+ type: integer
+ type: object
+ x-kubernetes-validations:
+ - message: builtinLocale is only available when localeProvider
+ is set to `builtin`
+ rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin'''
+ - message: icuLocale is only available when localeProvider is
+ set to `icu`
+ rule: '!has(self.icuLocale) || self.localeProvider == ''icu'''
+ - message: icuRules is only available when localeProvider is set
+ to `icu`
+ rule: '!has(self.icuRules) || self.localeProvider == ''icu'''
+ pg_basebackup:
+ description: |-
+ Bootstrap the cluster taking a physical backup of another compatible
+ PostgreSQL instance
+ properties:
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: The name of the server of which we need to take
+ a physical backup
+ minLength: 1
+ type: string
+ required:
+ - source
+ type: object
+ recovery:
+ description: Bootstrap the cluster from a backup
+ properties:
+ backup:
+ description: |-
+ The backup object containing the physical base backup from which to
+ initiate the recovery procedure.
+ Mutually exclusive with `source` and `volumeSnapshots`.
+ properties:
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ recoveryTarget:
+ description: |-
+ By default, the recovery process applies all the available
+ WAL files in the archive (full recovery). However, you can also
+ end the recovery as soon as a consistent state is reached or
+ recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object,
+ as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+ More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ properties:
+ backupID:
+ description: |-
+ The ID of the backup from which to start the recovery process.
+ If empty (default) the operator will automatically detect the backup
+ based on targetTime or targetLSN if specified. Otherwise use the
+ latest available backup in chronological order.
+ type: string
+ exclusive:
+ description: |-
+ Set the target to be exclusive. If omitted, defaults to false, so that
+ in Postgres, `recovery_target_inclusive` will be true
+ type: boolean
+ targetImmediate:
+ description: End recovery as soon as a consistent state
+ is reached
+ type: boolean
+ targetLSN:
+ description: The target LSN (Log Sequence Number)
+ type: string
+ targetName:
+ description: |-
+ The target name (to be previously created
+ with `pg_create_restore_point`)
+ type: string
+ targetTLI:
+ description: The target timeline ("latest" or a positive
+ integer)
+ type: string
+ targetTime:
+ description: The target time as a timestamp in the RFC3339
+ standard
+ type: string
+ targetXID:
+ description: The target transaction ID
+ type: string
+ type: object
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: |-
+ The external cluster whose backup we will restore. This is also
+ used as the name of the folder under which the backup is stored,
+ so it must be set to the name of the source cluster
+ Mutually exclusive with `backup`.
+ type: string
+ volumeSnapshots:
+ description: |-
+ The static PVC data source(s) from which to initiate the
+ recovery procedure. Currently supporting `VolumeSnapshot`
+ and `PersistentVolumeClaim` resources that map an existing
+ PVC group, compatible with CloudNativePG, and taken with
+ a cold backup copy on a fenced Postgres instance (limitation
+ which will be removed in the future when online backup
+ will be implemented).
+ Mutually exclusive with `backup`.
+ properties:
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ tablespaceStorage:
+ additionalProperties:
+ description: |-
+ TypedLocalObjectReference contains enough information to let you locate the
+ typed referenced object inside the same namespace.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ description: Configuration of the storage for PostgreSQL
+ tablespaces
+ type: object
+ walStorage:
+ description: Configuration of the storage for PostgreSQL
+ WAL (Write-Ahead Log)
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - storage
+ type: object
+ type: object
+ type: object
+ certificates:
+ description: The configuration for the CA and related certificates
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ description:
+ description: Description of this PostgreSQL cluster
+ type: string
+ enablePDB:
+ default: true
+ description: |-
+ Manage the `PodDisruptionBudget` resources within the cluster. When
+ configured as `true` (default setting), the pod disruption budgets
+ will safeguard the primary node from being terminated. Conversely,
+ setting it to `false` will result in the absence of any
+ `PodDisruptionBudget` resource, permitting the shutdown of all nodes
+ hosting the PostgreSQL cluster. This latter configuration is
+ advisable for any PostgreSQL cluster employed for
+ development/staging purposes.
+ type: boolean
+ enableSuperuserAccess:
+ default: false
+ description: |-
+ When this option is enabled, the operator will use the `SuperuserSecret`
+ to update the `postgres` user password (if the secret is
+ not present, the operator will automatically create one). When this
+ option is disabled, the operator will ignore the `SuperuserSecret` content, delete
+ it when automatically created, and then blank the password of the `postgres`
+ user by setting it to `NULL`. Disabled by default.
+ type: boolean
+ env:
+ description: |-
+ Env follows the Env format to pass environment variables
+ to the pods created in the cluster
+ items:
+ description: EnvVar represents an environment variable present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ EnvFrom follows the EnvFrom format to pass environment variables
+ sources to the pods to be used by Env
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in
+ the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ ephemeralVolumeSource:
+ description: EphemeralVolumeSource allows the user to configure the
+ source of ephemeral volumes.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to
+ consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the
+ PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ ephemeralVolumesSizeLimit:
+ description: |-
+ EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral
+ volumes
+ properties:
+ shm:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Shm is the size limit of the shared memory volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ temporaryData:
+ anyOf:
+ - type: integer
+ - type: string
+ description: TemporaryData is the size limit of the temporary
+ data volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ externalClusters:
+ description: The list of external clusters which are used in the configuration
+ items:
+ description: |-
+ ExternalCluster represents the connection parameters to an
+ external cluster which is used in the other sections of the configuration
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud
+ Storage JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing
+ the region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ archiveAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-archive'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL archive process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ restoreAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-restore'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL restore process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ type: object
+ required:
+ - destinationPath
+ type: object
+ connectionParameters:
+ additionalProperties:
+ type: string
+ description: The list of connection parameters, such as dbname,
+ host, username, etc
+ type: object
+ name:
+ description: The server name, required
+ type: string
+ password:
+ description: |-
+ The reference to the password to be used to connect to the server.
+ If a password is provided, CloudNativePG creates a PostgreSQL
+ passfile at `/controller/external/NAME/pass` (where "NAME" is the
+ cluster's name). This passfile is automatically referenced in the
+ connection string when establishing a connection to the remote
+ PostgreSQL server from the current PostgreSQL `Cluster`. This ensures
+ secure and efficient password management for external clusters.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ plugin:
+ description: |-
+ The configuration of the plugin that is taking care
+ of WAL archiving and backups for this external cluster
+ properties:
+ enabled:
+ default: true
+ description: Enabled is true if this plugin will be used
+ type: boolean
+ name:
+ description: Name is the plugin name
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is the configuration of the plugin
+ type: object
+ required:
+ - name
+ type: object
+ sslCert:
+ description: |-
+ The reference to an SSL certificate to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslKey:
+ description: |-
+ The reference to an SSL private key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslRootCert:
+ description: |-
+ The reference to an SSL CA public key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - name
+ type: object
+ type: array
+ failoverDelay:
+ default: 0
+ description: |-
+ The amount of time (in seconds) to wait before triggering a failover
+ after the primary PostgreSQL instance in the cluster was detected
+ to be unhealthy
+ format: int32
+ type: integer
+ imageCatalogRef:
+ description: Defines the major PostgreSQL version we want to use within
+ an ImageCatalog
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ major:
+ description: The major version of PostgreSQL we want to use from
+ the ImageCatalog
+ type: integer
+ x-kubernetes-validations:
+ - message: Major is immutable
+ rule: self == oldSelf
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - major
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: Only image catalogs are supported
+ rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'
+ - message: Only image catalogs are supported
+ rule: self.apiGroup == 'postgresql.cnpg.io'
+ imageName:
+ description: |-
+ Name of the container image, supporting both tags (`:`)
+ and digests for deterministic and repeatable deployments
+ (`:@sha256:`)
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of `Always`, `Never` or `IfNotPresent`.
+ If not defined, it defaults to `IfNotPresent`.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ imagePullSecrets:
+ description: The list of pull secrets to be used to pull the images
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate a
+ local object with a known type inside the same namespace
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ inheritedMetadata:
+ description: Metadata that will be inherited by all objects related
+ to the Cluster
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ instances:
+ default: 1
+ description: Number of instances required in the cluster
+ minimum: 1
+ type: integer
+ livenessProbeTimeout:
+ description: |-
+ LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+ to successfully respond to the liveness probe (default 30).
+ The Liveness probe failure threshold is derived from this value using the formula:
+ ceiling(livenessProbe / 10).
+ format: int32
+ type: integer
+ logLevel:
+ default: info
+ description: 'The instances'' log level, one of the following values:
+ error, warning, info (default), debug, trace'
+ enum:
+ - error
+ - warning
+ - info
+ - debug
+ - trace
+ type: string
+ managed:
+ description: The configuration that is used by the portions of PostgreSQL
+ that are managed by the instance manager
+ properties:
+ roles:
+ description: Database roles managed by the `Cluster`
+ items:
+ description: |-
+ RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
+ with the additional field Ensure specifying whether to ensure the presence or
+ absence of the role in the database
+
+ The defaults of the CREATE ROLE command are applied
+ Reference: https://www.postgresql.org/docs/current/sql-createrole.html
+ properties:
+ bypassrls:
+ description: |-
+ Whether a role bypasses every row-level security (RLS) policy.
+ Default is `false`.
+ type: boolean
+ comment:
+ description: Description of the role
+ type: string
+ connectionLimit:
+ default: -1
+ description: |-
+ If the role can log in, this specifies how many concurrent
+ connections the role can make. `-1` (the default) means no limit.
+ format: int64
+ type: integer
+ createdb:
+ description: |-
+ When set to `true`, the role being defined will be allowed to create
+ new databases. Specifying `false` (default) will deny a role the
+ ability to create databases.
+ type: boolean
+ createrole:
+ description: |-
+ Whether the role will be permitted to create, alter, drop, comment
+ on, change the security label for, and grant or revoke membership in
+ other roles. Default is `false`.
+ type: boolean
+ disablePassword:
+ description: DisablePassword indicates that a role's password
+ should be set to NULL in Postgres
+ type: boolean
+ ensure:
+ default: present
+ description: Ensure the role is `present` or `absent` -
+ defaults to "present"
+ enum:
+ - present
+ - absent
+ type: string
+ inRoles:
+ description: |-
+ List of one or more existing roles to which this role will be
+ immediately added as a new member. Default empty.
+ items:
+ type: string
+ type: array
+ inherit:
+ default: true
+ description: |-
+ Whether a role "inherits" the privileges of roles it is a member of.
+ Defaults is `true`.
+ type: boolean
+ login:
+ description: |-
+ Whether the role is allowed to log in. A role having the `login`
+ attribute can be thought of as a user. Roles without this attribute
+ are useful for managing database privileges, but are not users in
+ the usual sense of the word. Default is `false`.
+ type: boolean
+ name:
+ description: Name of the role
+ type: string
+ passwordSecret:
+ description: |-
+ Secret containing the password of the role (if present)
+ If null, the password will be ignored unless DisablePassword is set
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ replication:
+ description: |-
+ Whether a role is a replication role. A role must have this
+ attribute (or be a superuser) in order to be able to connect to the
+ server in replication mode (physical or logical replication) and in
+ order to be able to create or drop replication slots. A role having
+ the `replication` attribute is a very highly privileged role, and
+ should only be used on roles actually used for replication. Default
+ is `false`.
+ type: boolean
+ superuser:
+ description: |-
+ Whether the role is a `superuser` who can override all access
+ restrictions within the database - superuser status is dangerous and
+ should be used only when really needed. You must yourself be a
+ superuser to create a new superuser. Defaults is `false`.
+ type: boolean
+ validUntil:
+ description: |-
+ Date and time after which the role's password is no longer valid.
+ When omitted, the password will never expire (default).
+ format: date-time
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ services:
+ description: Services roles managed by the `Cluster`
+ properties:
+ additional:
+ description: Additional is a list of additional managed services
+ specified by the user.
+ items:
+ description: |-
+ ManagedService represents a specific service managed by the cluster.
+ It includes the type of service and its associated template specification.
+ properties:
+ selectorType:
+ description: |-
+ SelectorType specifies the type of selectors that the service will have.
+ Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.
+ enum:
+ - rw
+ - r
+ - ro
+ type: string
+ serviceTemplate:
+ description: ServiceTemplate is the template specification
+ for the service.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only
+ supported for certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the service.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information
+ on service's port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed
+ by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains
+ the configurations of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations
+ of Client IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic is
+ distributed to Service endpoints. Implementations can use this field as a
+ hint, but are not required to guarantee strict adherence. If the field is
+ not set, the implementation will apply its default routing strategy. If set
+ to "PreferClose", implementations should prioritize endpoints that are
+ topologically close (e.g., same zone).
+ This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ type: object
+ updateStrategy:
+ default: patch
+ description: UpdateStrategy describes how the service
+ differences should be reconciled
+ enum:
+ - patch
+ - replace
+ type: string
+ required:
+ - selectorType
+ - serviceTemplate
+ type: object
+ type: array
+ disabledDefaultServices:
+ description: |-
+ DisabledDefaultServices is a list of service types that are disabled by default.
+ Valid values are "r", and "ro", representing read, and read-only services.
+ items:
+ description: |-
+ ServiceSelectorType describes a valid value for generating the service selectors.
+ It indicates which type of service the selector applies to, such as read-write, read, or read-only
+ enum:
+ - rw
+ - r
+ - ro
+ type: string
+ type: array
+ type: object
+ type: object
+ maxSyncReplicas:
+ default: 0
+ description: |-
+ The target value for the synchronous replication quorum, that can be
+ decreased if the number of ready standbys is lower than this.
+ Undefined or 0 disable synchronous replication.
+ minimum: 0
+ type: integer
+ minSyncReplicas:
+ default: 0
+ description: |-
+ Minimum number of instances required in synchronous replication with the
+ primary. Undefined or 0 allow writes to complete when no standby is
+ available.
+ minimum: 0
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this cluster
+ properties:
+ customQueriesConfigMap:
+ description: The list of config maps containing the custom queries
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ customQueriesSecret:
+ description: The list of secrets containing the custom queries
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ disableDefaultQueries:
+ default: false
+ description: |-
+ Whether the default queries should be injected.
+ Set it to `true` if you don't want to inject default queries into the cluster.
+ Default: false.
+ type: boolean
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ tls:
+ description: |-
+ Configure TLS communication for the metrics endpoint.
+ Changing tls.enabled option will force a rollout of all instances.
+ properties:
+ enabled:
+ default: false
+ description: |-
+ Enable TLS for the monitoring endpoint.
+ Changing this option will force a rollout of all instances.
+ type: boolean
+ type: object
+ type: object
+ nodeMaintenanceWindow:
+ description: Define a maintenance window for the Kubernetes nodes
+ properties:
+ inProgress:
+ default: false
+ description: Is there a node maintenance activity in progress?
+ type: boolean
+ reusePVC:
+ default: true
+ description: |-
+ Reuse the existing PVC (wait for the node to come
+ up again) or not (recreate it elsewhere - when `instances` >1)
+ type: boolean
+ type: object
+ plugins:
+ description: |-
+ The plugins configuration, containing
+ any plugin to be loaded with the corresponding configuration
+ items:
+ description: |-
+ PluginConfiguration specifies a plugin that need to be loaded for this
+ cluster to be reconciled
+ properties:
+ enabled:
+ default: true
+ description: Enabled is true if this plugin will be used
+ type: boolean
+ name:
+ description: Name is the plugin name
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is the configuration of the plugin
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ postgresGID:
+ default: 26
+ description: The GID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresUID:
+ default: 26
+ description: The UID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresql:
+ description: Configuration of the PostgreSQL server
+ properties:
+ enableAlterSystem:
+ description: |-
+ If this parameter is true, the user will be able to invoke `ALTER SYSTEM`
+ on this CloudNativePG Cluster.
+ This should only be used for debugging and troubleshooting.
+ Defaults to false.
+ type: boolean
+ ldap:
+ description: Options to specify LDAP configuration
+ properties:
+ bindAsAuth:
+ description: Bind as authentication configuration
+ properties:
+ prefix:
+ description: Prefix for the bind authentication option
+ type: string
+ suffix:
+ description: Suffix for the bind authentication option
+ type: string
+ type: object
+ bindSearchAuth:
+ description: Bind+Search authentication configuration
+ properties:
+ baseDN:
+ description: Root DN to begin the user search
+ type: string
+ bindDN:
+ description: DN of the user to bind to the directory
+ type: string
+ bindPassword:
+ description: Secret with the password for the user to
+ bind to the directory
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ searchAttribute:
+ description: Attribute to match against the username
+ type: string
+ searchFilter:
+ description: Search filter to use when doing the search+bind
+ authentication
+ type: string
+ type: object
+ port:
+ description: LDAP server port
+ type: integer
+ scheme:
+ description: LDAP schema to be used, possible options are
+ `ldap` and `ldaps`
+ enum:
+ - ldap
+ - ldaps
+ type: string
+ server:
+ description: LDAP hostname or IP address
+ type: string
+ tls:
+ description: Set to 'true' to enable LDAP over TLS. 'false'
+ is default
+ type: boolean
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: PostgreSQL configuration options (postgresql.conf)
+ type: object
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ pg_ident:
+ description: |-
+ PostgreSQL User Name Maps rules (lines to be appended
+ to the pg_ident.conf file)
+ items:
+ type: string
+ type: array
+ promotionTimeout:
+ description: |-
+ Specifies the maximum number of seconds to wait when promoting an instance to primary.
+ Default value is 40000000, greater than one year in seconds,
+ big enough to simulate an infinite timeout
+ format: int32
+ type: integer
+ shared_preload_libraries:
+ description: Lists of shared preload libraries to add to the default
+ ones
+ items:
+ type: string
+ type: array
+ syncReplicaElectionConstraint:
+ description: |-
+ Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be
+ set up.
+ properties:
+ enabled:
+ description: This flag enables the constraints for sync replicas
+ type: boolean
+ nodeLabelsAntiAffinity:
+ description: A list of node labels values to extract and compare
+ to evaluate if the pods reside in the same topology or not
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ synchronous:
+ description: Configuration of the PostgreSQL synchronous replication
+ feature
+ properties:
+ dataDurability:
+ default: required
+ description: |-
+ If set to "required", data durability is strictly enforced. Write operations
+ with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will
+ block if there are insufficient healthy replicas, ensuring data persistence.
+ If set to "preferred", data durability is maintained when healthy replicas
+ are available, but the required number of instances will adjust dynamically
+ if replicas become unavailable. This setting relaxes strict durability enforcement
+ to allow for operational continuity. This setting is only applicable if both
+ `standbyNamesPre` and `standbyNamesPost` are unset (empty).
+ enum:
+ - required
+ - preferred
+ type: string
+ maxStandbyNamesFromCluster:
+ description: |-
+ Specifies the maximum number of local cluster pods that can be
+ automatically included in the `synchronous_standby_names` option in
+ PostgreSQL.
+ type: integer
+ method:
+ description: |-
+ Method to select synchronous replication standbys from the listed
+ servers, accepting 'any' (quorum-based synchronous replication) or
+ 'first' (priority-based synchronous replication) as values.
+ enum:
+ - any
+ - first
+ type: string
+ number:
+ description: |-
+ Specifies the number of synchronous standby servers that
+ transactions must wait for responses from.
+ type: integer
+ x-kubernetes-validations:
+ - message: The number of synchronous replicas should be greater
+ than zero
+ rule: self > 0
+ standbyNamesPost:
+ description: |-
+ A user-defined list of application names to be added to
+ `synchronous_standby_names` after local cluster pods (the order is
+ only useful for priority-based synchronous replication).
+ items:
+ type: string
+ type: array
+ standbyNamesPre:
+ description: |-
+ A user-defined list of application names to be added to
+ `synchronous_standby_names` before local cluster pods (the order is
+ only useful for priority-based synchronous replication).
+ items:
+ type: string
+ type: array
+ required:
+ - method
+ - number
+ type: object
+ x-kubernetes-validations:
+ - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre'
+ and empty 'standbyNamesPost'
+ rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre)
+ || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost)
+ || self.standbyNamesPost.size()==0))
+ type: object
+ primaryUpdateMethod:
+ default: restart
+ description: |-
+ Method to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be with a switchover (`switchover`) or in-place (`restart` - default)
+ enum:
+ - switchover
+ - restart
+ type: string
+ primaryUpdateStrategy:
+ default: unsupervised
+ description: |-
+ Deployment strategy to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be automated (`unsupervised` - default) or manual (`supervised`)
+ enum:
+ - unsupervised
+ - supervised
+ type: string
+ priorityClassName:
+ description: |-
+ Name of the priority class which will be used in every generated Pod, if the PriorityClass
+ specified does not exist, the pod will not be able to schedule. Please refer to
+ https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+ for more information
+ type: string
+ probes:
+ description: |-
+ The configuration of the probes to be injected
+ in the PostgreSQL Pods.
+ properties:
+ liveness:
+ description: The liveness probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ readiness:
+ description: The readiness probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ startup:
+ description: The startup probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ type: object
+ projectedVolumeTemplate:
+ description: |-
+ Template to be used to define projected volumes, projected volumes will be mounted
+ under `/projected` base folder
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume root to write
+ the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap data
+ to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the downwardAPI
+ data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume file
+ items:
+ description: DownwardAPIVolumeFile represents information
+ to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the
+ pod: only annotations, labels, name, namespace
+ and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must not
+ be absolute or contain the ''..'' path. Must
+ be utf-8 encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for
+ volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the secret data to
+ project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify whether the Secret
+ or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information about the
+ serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ replica:
+ description: Replica cluster configuration
+ properties:
+ enabled:
+ description: |-
+ If replica mode is enabled, this cluster will be a replica of an
+ existing cluster. Replica cluster can be created from a recovery
+ object store or via streaming through pg_basebackup.
+ Refer to the Replica clusters page of the documentation for more information.
+ type: boolean
+ minApplyDelay:
+ description: |-
+ When replica mode is enabled, this parameter allows you to replay
+ transactions only when the system time is at least the configured
+ time past the commit time. This provides an opportunity to correct
+ data loss errors. Note that when this parameter is set, a promotion
+ token cannot be used.
+ type: string
+ primary:
+ description: |-
+ Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the
+ topology specified in externalClusters
+ type: string
+ promotionToken:
+ description: |-
+ A demotion token generated by an external cluster used to
+ check if the promotion requirements are met.
+ type: string
+ self:
+ description: |-
+ Self defines the name of this cluster. It is used to determine if this is a primary
+ or a replica cluster, comparing it with `primary`
+ type: string
+ source:
+ description: The name of the external cluster which is the replication
+ origin
+ minLength: 1
+ type: string
+ required:
+ - source
+ type: object
+ replicationSlots:
+ default:
+ highAvailability:
+ enabled: true
+ description: Replication slots management configuration
+ properties:
+ highAvailability:
+ default:
+ enabled: true
+ description: Replication slots for high availability configuration
+ properties:
+ enabled:
+ default: true
+ description: |-
+ If enabled (default), the operator will automatically manage replication slots
+ on the primary instance and use them in streaming replication
+ connections with all the standby instances that are part of the HA
+ cluster. If disabled, the operator will not take advantage
+ of replication slots in streaming connections with the replicas.
+ This feature also controls replication slots in replica cluster,
+ from the designated primary to its cascading replicas.
+ type: boolean
+ slotPrefix:
+ default: _cnpg_
+ description: |-
+ Prefix for replication slots managed by the operator for HA.
+ It may only contain lower case letters, numbers, and the underscore character.
+ This can only be set at creation time. By default set to `_cnpg_`.
+ pattern: ^[0-9a-z_]*$
+ type: string
+ type: object
+ synchronizeReplicas:
+ description: Configures the synchronization of the user defined
+ physical replication slots
+ properties:
+ enabled:
+ default: true
+ description: When set to true, every replication slot that
+ is on the primary is synchronized on each standby
+ type: boolean
+ excludePatterns:
+ description: List of regular expression patterns to match
+ the names of replication slots to be excluded (by default
+ empty)
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ updateInterval:
+ default: 30
+ description: |-
+ Standby will update the status of the local replication slots
+ every `updateInterval` seconds (default 30).
+ minimum: 1
+ type: integer
+ type: object
+ resources:
+ description: |-
+ Resources requirements of every generated Pod. Please refer to
+ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ for more information.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified Kubernetes
+ scheduler. If not specified, the pod will be dispatched by the default
+ scheduler. More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
+ type: string
+ seccompProfile:
+ description: |-
+ The SeccompProfile applied to every Pod and Container.
+ Defaults to: `RuntimeDefault`
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountTemplate:
+ description: Configure the generation of the service account
+ properties:
+ metadata:
+ description: |-
+ Metadata are the metadata to be used for the generated
+ service account
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ required:
+ - metadata
+ type: object
+ smartShutdownTimeout:
+ default: 180
+ description: |-
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+ Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+ (that is: `stopDelay` - `smartShutdownTimeout`).
+ format: int32
+ type: integer
+ startDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ successfully start up (default 3600).
+ The startup probe failure threshold is derived from this value using the formula:
+ ceiling(startDelay / 10).
+ format: int32
+ type: integer
+ stopDelay:
+ default: 1800
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ gracefully shutdown (default 1800)
+ format: int32
+ type: integer
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ superuserSecret:
+ description: |-
+ The secret containing the superuser password. If not defined a new
+ secret will be created with a randomly generated password
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ switchoverDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a primary PostgreSQL instance
+ to gracefully shutdown during a switchover.
+ Default value is 3600 seconds (1 hour).
+ format: int32
+ type: integer
+ tablespaces:
+ description: The tablespaces configuration
+ items:
+ description: |-
+ TablespaceConfiguration is the configuration of a tablespace, and includes
+ the storage specification for the tablespace
+ properties:
+ name:
+ description: The name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ properties:
+ name:
+ type: string
+ type: object
+ storage:
+ description: The storage configuration for the tablespace
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent
+ Volume Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes
+ to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to
+ the PersistentVolume backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ temporary:
+ default: false
+ description: |-
+ When set to true, the tablespace will be added as a `temp_tablespaces`
+ entry in PostgreSQL, and will be available to automatically house temp
+ database objects, or other temporary files. Please refer to PostgreSQL
+ documentation for more information on the `temp_tablespaces` GUC.
+ type: boolean
+ required:
+ - name
+ - storage
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints specifies how to spread matching pods among the given topology.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching
+ pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ walStorage:
+ description: Configuration of the storage for PostgreSQL WAL (Write-Ahead
+ Log)
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ required:
+ - instances
+ type: object
+ x-kubernetes-validations:
+ - message: imageName and imageCatalogRef are mutually exclusive
+ rule: '!(has(self.imageCatalogRef) && has(self.imageName))'
+ status:
+ description: |-
+ Most recently observed status of the cluster. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ availableArchitectures:
+ description: AvailableArchitectures reports the available architectures
+ of a cluster
+ items:
+ description: AvailableArchitecture represents the state of a cluster's
+ architecture
+ properties:
+ goArch:
+ description: GoArch is the name of the executable architecture
+ type: string
+ hash:
+ description: Hash is the hash of the executable
+ type: string
+ required:
+ - goArch
+ - hash
+ type: object
+ type: array
+ azurePVCUpdateEnabled:
+ description: AzurePVCUpdateEnabled shows if the PVC online upgrade
+ is enabled for this cluster
+ type: boolean
+ certificates:
+ description: The configuration for the CA and related certificates,
+ initialized with defaults.
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ expirations:
+ additionalProperties:
+ type: string
+ description: Expiration dates for all certificates.
+ type: object
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ cloudNativePGCommitHash:
+ description: The commit hash number of which this operator running
+ type: string
+ cloudNativePGOperatorHash:
+ description: The hash of the binary of the operator
+ type: string
+ conditions:
+ description: Conditions for cluster object
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ configMapResourceVersion:
+ description: |-
+ The list of resource versions of the configmaps,
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ configmap data
+ properties:
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the config maps used to pass metrics.
+ Map keys are the config map names, map values are the versions
+ type: object
+ type: object
+ currentPrimary:
+ description: Current primary instance
+ type: string
+ currentPrimaryFailingSinceTimestamp:
+ description: |-
+ The timestamp when the primary was detected to be unhealthy
+ This field is reported when `.spec.failoverDelay` is populated or during online upgrades
+ type: string
+ currentPrimaryTimestamp:
+ description: The timestamp when the last actual promotion to primary
+ has occurred
+ type: string
+ danglingPVC:
+ description: |-
+ List of all the PVCs created by this cluster and still available
+ which are not attached to a Pod
+ items:
+ type: string
+ type: array
+ demotionToken:
+ description: |-
+ DemotionToken is a JSON token containing the information
+ from pg_controldata such as Database system identifier, Latest checkpoint's
+ TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO
+ WAL file, and Time of latest checkpoint
+ type: string
+ firstRecoverabilityPoint:
+ description: |-
+ The first recoverability point, stored as a date in RFC3339 format.
+ This field is calculated from the content of FirstRecoverabilityPointByMethod
+ type: string
+ firstRecoverabilityPointByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: The first recoverability point, stored as a date in RFC3339
+ format, per backup method type
+ type: object
+ healthyPVC:
+ description: List of all the PVCs not dangling nor initializing
+ items:
+ type: string
+ type: array
+ image:
+ description: Image contains the image name used by the pods
+ type: string
+ initializingPVC:
+ description: List of all the PVCs that are being initialized by this
+ cluster
+ items:
+ type: string
+ type: array
+ instanceNames:
+ description: List of instance names in the cluster
+ items:
+ type: string
+ type: array
+ instances:
+ description: The total number of PVC Groups detected in the cluster.
+ It may differ from the number of existing instance pods.
+ type: integer
+ instancesReportedState:
+ additionalProperties:
+ description: InstanceReportedState describes the last reported state
+ of an instance during a reconciliation loop
+ properties:
+ isPrimary:
+ description: indicates if an instance is the primary one
+ type: boolean
+ timeLineID:
+ description: indicates on which TimelineId the instance is
+ type: integer
+ required:
+ - isPrimary
+ type: object
+ description: The reported state of the instances during the last reconciliation
+ loop
+ type: object
+ instancesStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: InstancesStatus indicates in which status the instances
+ are
+ type: object
+ jobCount:
+ description: How many Jobs have been created by this cluster
+ format: int32
+ type: integer
+ lastFailedBackup:
+ description: Stored as a date in RFC3339 format
+ type: string
+ lastPromotionToken:
+ description: |-
+ LastPromotionToken is the last verified promotion token that
+ was used to promote a replica cluster
+ type: string
+ lastSuccessfulBackup:
+ description: |-
+ Last successful backup, stored as a date in RFC3339 format
+ This field is calculated from the content of LastSuccessfulBackupByMethod
+ type: string
+ lastSuccessfulBackupByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: Last successful backup, stored as a date in RFC3339 format,
+ per backup method type
+ type: object
+ latestGeneratedNode:
+ description: ID of the latest generated node (used to avoid node name
+ clashing)
+ type: integer
+ managedRolesStatus:
+ description: ManagedRolesStatus reports the state of the managed roles
+ in the cluster
+ properties:
+ byStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: ByStatus gives the list of roles in each state
+ type: object
+ cannotReconcile:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: |-
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+ with an explanation of the cause
+ type: object
+ passwordStatus:
+ additionalProperties:
+ description: PasswordState represents the state of the password
+ of a managed RoleConfiguration
+ properties:
+ resourceVersion:
+ description: the resource version of the password secret
+ type: string
+ transactionID:
+ description: the last transaction ID to affect the role
+ definition in PostgreSQL
+ format: int64
+ type: integer
+ type: object
+ description: PasswordStatus gives the last transaction id and
+ password secret version for each managed role
+ type: object
+ type: object
+ onlineUpdateEnabled:
+ description: OnlineUpdateEnabled shows if the online upgrade is enabled
+ inside the cluster
+ type: boolean
+ phase:
+ description: Current phase of the cluster
+ type: string
+ phaseReason:
+ description: Reason for the current phase
+ type: string
+ pluginStatus:
+ description: PluginStatus is the status of the loaded plugins
+ items:
+ description: PluginStatus is the status of a loaded plugin
+ properties:
+ backupCapabilities:
+ description: |-
+ BackupCapabilities are the list of capabilities of the
+ plugin regarding the Backup management
+ items:
+ type: string
+ type: array
+ capabilities:
+ description: |-
+ Capabilities are the list of capabilities of the
+ plugin
+ items:
+ type: string
+ type: array
+ name:
+ description: Name is the name of the plugin
+ type: string
+ operatorCapabilities:
+ description: |-
+ OperatorCapabilities are the list of capabilities of the
+ plugin regarding the reconciler
+ items:
+ type: string
+ type: array
+ restoreJobHookCapabilities:
+ description: |-
+ RestoreJobHookCapabilities are the list of capabilities of the
+ plugin regarding the RestoreJobHook management
+ items:
+ type: string
+ type: array
+ status:
+ description: Status contain the status reported by the plugin
+ through the SetStatusInCluster interface
+ type: string
+ version:
+ description: |-
+ Version is the version of the plugin loaded by the
+ latest reconciliation loop
+ type: string
+ walCapabilities:
+ description: |-
+ WALCapabilities are the list of capabilities of the
+ plugin regarding the WAL management
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - version
+ type: object
+ type: array
+ poolerIntegrations:
+ description: The integration needed by poolers referencing the cluster
+ properties:
+ pgBouncerIntegration:
+ description: PgBouncerIntegrationStatus encapsulates the needed
+ integration for the pgbouncer poolers referencing the cluster
+ properties:
+ secrets:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ pvcCount:
+ description: How many PVCs have been created by this cluster
+ format: int32
+ type: integer
+ readService:
+ description: Current list of read pods
+ type: string
+ readyInstances:
+ description: The total number of ready instances in the cluster. It
+ is equal to the number of ready instance pods.
+ type: integer
+ resizingPVC:
+ description: List of all the PVCs that have ResizingPVC condition.
+ items:
+ type: string
+ type: array
+ secretsResourceVersion:
+ description: |-
+ The list of resource versions of the secrets
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ secret data
+ properties:
+ applicationSecretVersion:
+ description: The resource version of the "app" user secret
+ type: string
+ barmanEndpointCA:
+ description: The resource version of the Barman Endpoint CA if
+ provided
+ type: string
+ caSecretVersion:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ clientCaSecretVersion:
+ description: The resource version of the PostgreSQL client-side
+ CA secret version
+ type: string
+ externalClusterSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the external cluster secrets
+ type: object
+ managedRoleSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the managed roles secrets
+ type: object
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the secrets used to pass metrics.
+ Map keys are the secret names, map values are the versions
+ type: object
+ replicationSecretVersion:
+ description: The resource version of the "streaming_replica" user
+ secret
+ type: string
+ serverCaSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ CA secret version
+ type: string
+ serverSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ secret version
+ type: string
+ superuserSecretVersion:
+ description: The resource version of the "postgres" user secret
+ type: string
+ type: object
+ switchReplicaClusterStatus:
+ description: SwitchReplicaClusterStatus is the status of the switch
+ to replica cluster
+ properties:
+ inProgress:
+ description: InProgress indicates if there is an ongoing procedure
+ of switching a cluster to a replica cluster.
+ type: boolean
+ type: object
+ tablespacesStatus:
+ description: TablespacesStatus reports the state of the declarative
+ tablespaces in the cluster
+ items:
+ description: TablespaceState represents the state of a tablespace
+ in a cluster
+ properties:
+ error:
+ description: Error is the reconciliation error, if any
+ type: string
+ name:
+ description: Name is the name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ type: string
+ state:
+ description: State is the latest reconciliation state
+ type: string
+ required:
+ - name
+ - state
+ type: object
+ type: array
+ targetPrimary:
+ description: |-
+ Target primary instance, this is different from the previous one
+ during a switchover or a failover
+ type: string
+ targetPrimaryTimestamp:
+ description: The timestamp when the last request for a new primary
+ has occurred
+ type: string
+ timelineID:
+ description: The timeline of the Postgres cluster
+ type: integer
+ topology:
+ description: Instances topology.
+ properties:
+ instances:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: PodTopologyLabels represent the topology of a Pod.
+ map[labelName]labelValue
+ type: object
+ description: Instances contains the pod topology of the instances
+ type: object
+ nodesUsed:
+ description: |-
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+ A value of '1' suggests that all instances are hosted on a single node,
+ implying the absence of High Availability (HA). Ideally, this value should
+ be the same as the number of instances in the Postgres HA cluster, implying
+ shared nothing architecture on the compute side.
+ format: int32
+ type: integer
+ successfullyExtracted:
+ description: |-
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+ in synchronous replica election in case of failures
+ type: boolean
+ type: object
+ unusablePVC:
+ description: List of all the PVCs that are unusable because another
+ PVC is missing
+ items:
+ type: string
+ type: array
+ writeService:
+ description: Current write pod
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: databases.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Database
+ listKind: DatabaseList
+ plural: databases
+ singular: database
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Database is the Schema for the databases API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired Database.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allowConnections:
+ description: |-
+ Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and
+ `ALTER DATABASE`. If false then no one can connect to this database.
+ type: boolean
+ builtinLocale:
+ description: |-
+ Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. Specifies the locale name when the
+ builtin provider is used. This option requires `localeProvider` to
+ be set to `builtin`. Available from PostgreSQL 17.
+ type: string
+ x-kubernetes-validations:
+ - message: builtinLocale is immutable
+ rule: self == oldSelf
+ cluster:
+ description: The name of the PostgreSQL cluster hosting the database.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ collationVersion:
+ description: |-
+ Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This
+ setting cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: collationVersion is immutable
+ rule: self == oldSelf
+ connectionLimit:
+ description: |-
+ Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and
+ `ALTER DATABASE`. How many concurrent connections can be made to
+ this database. -1 (the default) means no limit.
+ type: integer
+ databaseReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this database.
+ enum:
+ - delete
+ - retain
+ type: string
+ encoding:
+ description: |-
+ Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Character set encoding to use in the database.
+ type: string
+ x-kubernetes-validations:
+ - message: encoding is immutable
+ rule: self == oldSelf
+ ensure:
+ default: present
+ description: Ensure the PostgreSQL database is `present` or `absent`
+ - defaults to "present".
+ enum:
+ - present
+ - absent
+ type: string
+ icuLocale:
+ description: |-
+ Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. Specifies the ICU locale when the ICU
+ provider is used. This option requires `localeProvider` to be set to
+ `icu`. Available from PostgreSQL 15.
+ type: string
+ x-kubernetes-validations:
+ - message: icuLocale is immutable
+ rule: self == oldSelf
+ icuRules:
+ description: |-
+ Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Specifies additional collation rules to customize
+ the behavior of the default collation. This option requires
+ `localeProvider` to be set to `icu`. Available from PostgreSQL 16.
+ type: string
+ x-kubernetes-validations:
+ - message: icuRules is immutable
+ rule: self == oldSelf
+ isTemplate:
+ description: |-
+ Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER
+ DATABASE`. If true, this database is considered a template and can
+ be cloned by any user with `CREATEDB` privileges.
+ type: boolean
+ locale:
+ description: |-
+ Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Sets the default collation order and character
+ classification in the new database.
+ type: string
+ x-kubernetes-validations:
+ - message: locale is immutable
+ rule: self == oldSelf
+ localeCType:
+ description: |-
+ Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: localeCType is immutable
+ rule: self == oldSelf
+ localeCollate:
+ description: |-
+ Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: localeCollate is immutable
+ rule: self == oldSelf
+ localeProvider:
+ description: |-
+ Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. This option sets the locale provider for
+ databases created in the new cluster. Available from PostgreSQL 16.
+ type: string
+ x-kubernetes-validations:
+ - message: localeProvider is immutable
+ rule: self == oldSelf
+ name:
+ description: The name of the database to create inside PostgreSQL.
+ This setting cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ - message: the name postgres is reserved
+ rule: self != 'postgres'
+ - message: the name template0 is reserved
+ rule: self != 'template0'
+ - message: the name template1 is reserved
+ rule: self != 'template1'
+ owner:
+ description: |-
+ Maps to the `OWNER` parameter of `CREATE DATABASE`.
+ Maps to the `OWNER TO` command of `ALTER DATABASE`.
+ The role name of the user who owns the database inside PostgreSQL.
+ type: string
+ tablespace:
+ description: |-
+ Maps to the `TABLESPACE` parameter of `CREATE DATABASE`.
+ Maps to the `SET TABLESPACE` command of `ALTER DATABASE`.
+ The name of the tablespace (in PostgreSQL) that will be associated
+ with the new database. This tablespace will be the default
+ tablespace used for objects created in this database.
+ type: string
+ template:
+ description: |-
+ Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. The name of the template from which to create
+ this database.
+ type: string
+ x-kubernetes-validations:
+ - message: template is immutable
+ rule: self == oldSelf
+ required:
+ - cluster
+ - name
+ - owner
+ type: object
+ x-kubernetes-validations:
+ - message: builtinLocale is only available when localeProvider is set
+ to `builtin`
+ rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin'''
+ - message: icuLocale is only available when localeProvider is set to `icu`
+ rule: '!has(self.icuLocale) || self.localeProvider == ''icu'''
+ - message: icuRules is only available when localeProvider is set to `icu`
+ rule: '!has(self.icuRules) || self.localeProvider == ''icu'''
+ status:
+ description: |-
+ Most recently observed status of the Database. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ applied:
+ description: Applied is true if the database was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: imagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ImageCatalog
+ listKind: ImageCatalogList
+ plural: imagecatalogs
+ singular: imagecatalog
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ImageCatalog is the Schema for the imagecatalogs API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: poolers.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Pooler
+ listKind: PoolerList
+ plural: poolers
+ singular: pooler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.type
+ name: Type
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Pooler is the Schema for the poolers API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the Pooler.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: |-
+ This is the cluster reference on which the Pooler will work.
+ Pooler name should never match with any cluster name within the same namespace.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ deploymentStrategy:
+ description: The deployment strategy to use for pgbouncer to replace
+ existing pods with new ones
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if DeploymentStrategyType =
+ RollingUpdate.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be scheduled above the desired number of
+ pods.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up.
+ Defaults to 25%.
+ Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ the rolling update starts, such that the total number of old and new pods do not exceed
+ 130% of desired pods. Once old pods have been killed,
+ new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ at any time during the update is at most 130% of desired pods.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ Absolute number is calculated from percentage by rounding down.
+ This can not be 0 if MaxSurge is 0.
+ Defaults to 25%.
+ Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ that the total number of pods available at all times during the update is at
+ least 70% of desired pods.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description: Type of deployment. Can be "Recreate" or "RollingUpdate".
+ Default is RollingUpdate.
+ type: string
+ type: object
+ instances:
+ default: 1
+ description: 'The number of replicas we want. Default: 1.'
+ format: int32
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this pooler.
+ properties:
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ pgbouncer:
+ description: The PgBouncer configuration
+ properties:
+ authQuery:
+ description: |-
+ The query that will be used to download the hash of the password
+ of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
+ In case it is specified, also an AuthQuerySecret has to be specified and
+ no automatic CNPG Cluster integration will be triggered.
+ type: string
+ authQuerySecret:
+ description: |-
+ The credentials of the user that need to be used for the authentication
+ query. In case it is specified, also an AuthQuery
+ (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
+ has to be specified and no automatic CNPG Cluster integration will be triggered.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Additional parameters to be passed to PgBouncer - please check
+ the CNPG documentation for a list of options you can configure
+ type: object
+ paused:
+ default: false
+ description: |-
+ When set to `true`, PgBouncer will disconnect from the PostgreSQL
+ server, first waiting for all queries to complete, and pause all new
+ client connections until this value is set to `false` (default). Internally,
+ the operator calls PgBouncer's `PAUSE` and `RESUME` commands.
+ type: boolean
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ poolMode:
+ default: session
+ description: 'The pool mode. Default: `session`.'
+ enum:
+ - session
+ - transaction
+ type: string
+ type: object
+ serviceTemplate:
+ description: Template for the Service to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the service.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information on service's
+ port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains the configurations
+ of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations of Client
+ IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic is
+ distributed to Service endpoints. Implementations can use this field as a
+ hint, but are not required to guarantee strict adherence. If the field is
+ not set, the implementation will apply its default routing strategy. If set
+ to "PreferClose", implementations should prioritize endpoints that are
+ topologically close (e.g., same zone).
+ This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ type: object
+ template:
+ description: The template of the Pod to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ automountServiceAccountToken:
+ description: AutomountServiceAccountToken indicates whether
+ a service account token should be automatically mounted.
+ type: boolean
+ containers:
+ description: |-
+ List of containers belonging to the pod.
+ Containers cannot currently be added or removed.
+ There must be at least one container in a Pod.
+ Cannot be updated.
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ dnsConfig:
+ description: |-
+ Specifies the DNS parameters of a pod.
+ Parameters specified here will be merged to the generated DNS
+ configuration based on DNSPolicy.
+ properties:
+ nameservers:
+ description: |-
+ A list of DNS name server IP addresses.
+ This will be appended to the base nameservers generated from DNSPolicy.
+ Duplicated nameservers will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ options:
+ description: |-
+ A list of DNS resolver options.
+ This will be merged with the base options generated from DNSPolicy.
+ Duplicated entries will be removed. Resolution options given in Options
+ will override those that appear in the base DNSPolicy.
+ items:
+ description: PodDNSConfigOption defines DNS resolver
+ options of a pod.
+ properties:
+ name:
+ description: Required.
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ searches:
+ description: |-
+ A list of DNS search domains for host-name lookup.
+ This will be appended to the base search paths generated from DNSPolicy.
+ Duplicated search paths will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ dnsPolicy:
+ description: |-
+ Set DNS policy for the pod.
+ Defaults to "ClusterFirst".
+ Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ To have DNS options set along with hostNetwork, you have to specify DNS policy
+ explicitly to 'ClusterFirstWithHostNet'.
+ type: string
+ enableServiceLinks:
+ description: |-
+ EnableServiceLinks indicates whether information about services should be injected into pod's
+ environment variables, matching the syntax of Docker links.
+ Optional: Defaults to true.
+ type: boolean
+ ephemeralContainers:
+ description: |-
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ items:
+ description: |-
+ An EphemeralContainer is a temporary container that you may add to an existing Pod for
+ user-initiated activities such as debugging. Ephemeral containers have no resource or
+ scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+ removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+ Pod to exceed its resource allocation.
+
+ To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+ Pod. Ephemeral containers may not be removed or restarted.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: Lifecycle is not allowed for ephemeral
+ containers.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the ephemeral container specified as a DNS_LABEL.
+ This name must be unique among all containers, init containers and ephemeral containers.
+ type: string
+ ports:
+ description: Ports are not allowed for ephemeral containers.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ already allocated to the pod.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for the container to manage the restart behavior of each
+ container within a pod.
+ This may only be set for init containers. You cannot set this field on
+ ephemeral containers.
+ type: string
+ securityContext:
+ description: |-
+ Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ targetContainerName:
+ description: |-
+ If set, the name of the container from PodSpec that this ephemeral container targets.
+ The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+
+ The container runtime must implement support for this feature. If the runtime does not
+ support namespace targeting then the result of setting this field is undefined.
+ type: string
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ hostAliases:
+ description: |-
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ file if specified.
+ items:
+ description: |-
+ HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+ pod's hosts file.
+ properties:
+ hostnames:
+ description: Hostnames for the above IP address.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ip:
+ description: IP address of the host file entry.
+ type: string
+ required:
+ - ip
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - ip
+ x-kubernetes-list-type: map
+ hostIPC:
+ description: |-
+ Use the host's ipc namespace.
+ Optional: Default to false.
+ type: boolean
+ hostNetwork:
+ description: |-
+ Host networking requested for this pod. Use the host's network namespace.
+ If this option is set, the ports that will be used must be specified.
+ Default to false.
+ type: boolean
+ hostPID:
+ description: |-
+ Use the host's pid namespace.
+ Optional: Default to false.
+ type: boolean
+ hostUsers:
+ description: |-
+ Use the host's user namespace.
+ Optional: Default to true.
+ If set to true or not present, the pod will be run in the host user namespace, useful
+ for when the pod needs a feature only available to the host user namespace, such as
+ loading a kernel module with CAP_SYS_MODULE.
+ When set to false, a new userns is created for the pod. Setting false is useful for
+ mitigating container breakout vulnerabilities even allowing users to run their
+ containers as root without actually having root privileges on the host.
+ This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ type: boolean
+ hostname:
+ description: |-
+ Specifies the hostname of the Pod
+ If not specified, the pod's hostname will be set to a system-defined value.
+ type: string
+ imagePullSecrets:
+ description: |-
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ If specified, these secrets will be passed to individual puller implementations for them to use.
+ More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ initContainers:
+ description: |-
+ List of initialization containers belonging to the pod.
+ Init containers are executed in order prior to containers being started. If any
+ init container fails, the pod is considered to have failed and is handled according
+ to its restartPolicy. The name for an init container or normal container must be
+ unique among all containers.
+ Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ The resourceRequirements of an init container are taken into account during scheduling
+ by finding the highest request/limit for each resource type, and then using the max of
+ of that value or the sum of the normal containers. Limits are applied to init containers
+ in a similar fashion.
+ Init containers cannot currently be added or removed.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ nodeName:
+ description: |-
+ NodeName indicates in which node this pod is scheduled.
+ If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+ Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+ This field should not be used to express a desire for the pod to be scheduled on a specific node.
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is a selector which must be true for the pod to fit on a node.
+ Selector which must match a node's labels for the pod to be scheduled on that node.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ x-kubernetes-map-type: atomic
+ os:
+ description: |-
+ Specifies the OS of the containers in the pod.
+ Some pod and container fields are restricted if this is set.
+
+ If the OS field is set to linux, the following fields must be unset:
+ -securityContext.windowsOptions
+
+ If the OS field is set to windows, following fields must be unset:
+ - spec.hostPID
+ - spec.hostIPC
+ - spec.hostUsers
+ - spec.securityContext.appArmorProfile
+ - spec.securityContext.seLinuxOptions
+ - spec.securityContext.seccompProfile
+ - spec.securityContext.fsGroup
+ - spec.securityContext.fsGroupChangePolicy
+ - spec.securityContext.sysctls
+ - spec.shareProcessNamespace
+ - spec.securityContext.runAsUser
+ - spec.securityContext.runAsGroup
+ - spec.securityContext.supplementalGroups
+ - spec.securityContext.supplementalGroupsPolicy
+ - spec.containers[*].securityContext.appArmorProfile
+ - spec.containers[*].securityContext.seLinuxOptions
+ - spec.containers[*].securityContext.seccompProfile
+ - spec.containers[*].securityContext.capabilities
+ - spec.containers[*].securityContext.readOnlyRootFilesystem
+ - spec.containers[*].securityContext.privileged
+ - spec.containers[*].securityContext.allowPrivilegeEscalation
+ - spec.containers[*].securityContext.procMount
+ - spec.containers[*].securityContext.runAsUser
+ - spec.containers[*].securityContext.runAsGroup
+ properties:
+ name:
+ description: |-
+ Name is the name of the operating system. The currently supported values are linux and windows.
+ Additional value may be defined in future and can be one of:
+ https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+ type: string
+ required:
+ - name
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ type: object
+ preemptionPolicy:
+ description: |-
+ PreemptionPolicy is the Policy for preempting pods with lower priority.
+ One of Never, PreemptLowerPriority.
+ Defaults to PreemptLowerPriority if unset.
+ type: string
+ priority:
+ description: |-
+ The priority value. Various system components use this field to find the
+ priority of the pod. When Priority Admission Controller is enabled, it
+ prevents users from setting this field. The admission controller populates
+ this field from PriorityClassName.
+ The higher the value, the higher the priority.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the pod's priority. "system-node-critical" and
+ "system-cluster-critical" are two special keywords which indicate the
+ highest priorities with the former being the highest priority. Any other
+ name must be defined by creating a PriorityClass object with that name.
+ If not specified, the pod priority will be default or zero if there is no
+ default.
+ type: string
+ readinessGates:
+ description: |-
+ If specified, all readiness gates will be evaluated for pod readiness.
+ A pod is ready when all its containers are ready AND
+ all conditions specified in the readiness gates have status equal to "True"
+ More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ items:
+ description: PodReadinessGate contains the reference to
+ a pod condition
+ properties:
+ conditionType:
+ description: ConditionType refers to a condition in
+ the pod's condition list with matching type.
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resourceClaims:
+ description: |-
+ ResourceClaims defines which ResourceClaims must be allocated
+ and reserved before the Pod is allowed to start. The resources
+ will be made available to those containers which consume them
+ by name.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable.
+ items:
+ description: |-
+ PodResourceClaim references exactly one ResourceClaim, either directly
+ or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+ for the pod.
+
+ It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+ Containers that need access to the ResourceClaim reference it with this name.
+ properties:
+ name:
+ description: |-
+ Name uniquely identifies this resource claim inside the pod.
+ This must be a DNS_LABEL.
+ type: string
+ resourceClaimName:
+ description: |-
+ ResourceClaimName is the name of a ResourceClaim object in the same
+ namespace as this pod.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ resourceClaimTemplateName:
+ description: |-
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ object in the same namespace as this pod.
+
+ The template will be used to create a new ResourceClaim, which will
+ be bound to this pod. When this pod is deleted, the ResourceClaim
+ will also be deleted. The pod name and resource name, along with a
+ generated component, will be used to form a unique name for the
+ ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+
+ This field is immutable and no changes will be made to the
+ corresponding ResourceClaim by the control plane after creating the
+ ResourceClaim.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ restartPolicy:
+ description: |-
+ Restart policy for all containers within the pod.
+ One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ Default to Always.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ type: string
+ runtimeClassName:
+ description: |-
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ empty definition that uses the default runtime handler.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ type: string
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified scheduler.
+ If not specified, the pod will be dispatched by default scheduler.
+ type: string
+ schedulingGates:
+ description: |-
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ scheduler will not attempt to schedule the pod.
+
+ SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+ items:
+ description: PodSchedulingGate is associated to a Pod to
+ guard its scheduling.
+ properties:
+ name:
+ description: |-
+ Name of the scheduling gate.
+ Each scheduling gate must have a unique name field.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ securityContext:
+ description: |-
+ SecurityContext holds pod-level security attributes and common container settings.
+ Optional: Defaults to empty. See type description for default values of each field.
+ properties:
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod.
+ Some volume types allow the Kubelet to change the ownership of that volume
+ to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup
+ 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: |-
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ before being exposed inside Pod. This field will only apply to
+ volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir.
+ Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: |-
+ A list of groups applied to the first process run in each container, in
+ addition to the container's primary GID and fsGroup (if specified). If
+ the SupplementalGroupsPolicy feature is enabled, the
+ supplementalGroupsPolicy field determines whether these are in addition
+ to or instead of any group memberships defined in the container image.
+ If unspecified, no additional groups are added, though group memberships
+ defined in the container image may still be used, depending on the
+ supplementalGroupsPolicy field.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ x-kubernetes-list-type: atomic
+ supplementalGroupsPolicy:
+ description: |-
+ Defines how supplemental groups of the first container processes are calculated.
+ Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+ (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+ and the container runtime must implement support for this feature.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ sysctls:
+ description: |-
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be
+ set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options within a container's SecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of
+ the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ description: |-
+ DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+ Deprecated: Use serviceAccountName instead.
+ type: string
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ type: string
+ setHostnameAsFQDN:
+ description: |-
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ If a pod does not have FQDN, this has no effect.
+ Default to false.
+ type: boolean
+ shareProcessNamespace:
+ description: |-
+ Share a single process namespace between all of the containers in a pod.
+ When this is set containers will be able to view and signal processes from other containers
+ in the same pod, and the first process in each container will not be assigned PID 1.
+ HostPID and ShareProcessNamespace cannot both be set.
+ Optional: Default to false.
+ type: boolean
+ subdomain:
+ description: |-
+ If specified, the fully qualified Pod hostname will be "...svc.".
+ If not specified, the pod will not have a domainname at all.
+ type: string
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ If this value is nil, the default grace period will be used instead.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ Defaults to 30 seconds.
+ format: int64
+ type: integer
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ domains. Scheduler will schedule pods in a way which abides by the constraints.
+ All topologySpreadConstraints are ANDed.
+ items:
+ description: TopologySpreadConstraint specifies how to spread
+ matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ description: |-
+ List of volumes that can be mounted by containers belonging to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes
+ items:
+ description: Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: azureDisk represents an Azure Data Disk
+ mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode:
+ None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk
+ in the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in
+ the blob storage
+ type: string
+ fsType:
+ default: ext4
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure
+ managed data disk (only in managed availability
+ set). defaults to shared'
+ type: string
+ readOnly:
+ default: false
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: azureFile represents an Azure File Service
+ mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: cephFS represents a Ceph FS mount on the
+ host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: 'path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default
+ is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about
+ the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing the
+ pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of
+ the pod: only annotations, labels, name,
+ namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the ''..'' path.
+ Must be utf-8 encoded. The first item of
+ the relative path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and then
+ exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target
+ worldwide names (WWNs)'
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description: driver is the name of the driver to
+ use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds
+ extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: flocker represents a Flocker volume attached
+ to a kubelet's host machine. This depends on the Flocker
+ control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the
+ specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ image:
+ description: |-
+ image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+ The volume is resolved at pod startup depending on which PullPolicy value is provided:
+
+ - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+
+ The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+ A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+ The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+ The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+ The volume will be mounted read-only (ro) and non-executable files (noexec).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+ properties:
+ pullPolicy:
+ description: |-
+ Policy for pulling OCI objects. Possible values are:
+ Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ type: string
+ reference:
+ description: |-
+ Required: Image or artifact reference to be used.
+ Behaves in the same way as pod.spec.containers[*].image.
+ Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ default: default
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon
+ Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: portworxVolume represents a portworx volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the
+ configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether
+ the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and uid
+ are supported.'
+ properties:
+ apiVersion:
+ description: Version of the
+ schema the FieldPath is written
+ in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field
+ to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the
+ relative path name of the file
+ to be created. Must not be absolute
+ or contain the ''..'' path. Must
+ be utf-8 encoded. The first item
+ of the relative path must not
+ start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name:
+ required for volumes, optional
+ for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the
+ secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description: quobyte represents a Quobyte mount on the
+ host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ default: /etc/ceph/keyring
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ default: rbd
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ default: admin
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ default: xfs
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the
+ ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the
+ ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL
+ communication with Gateway, default false
+ type: boolean
+ storageMode:
+ default: ThinProvisioned
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description: optional field specify whether the
+ Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: vsphereVolume represents a vSphere volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy
+ Based Management (SPBM) profile ID associated
+ with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - containers
+ type: object
+ type: object
+ type:
+ default: rw
+ description: 'Type of service to forward traffic to. Default: `rw`.'
+ enum:
+ - rw
+ - ro
+ type: string
+ required:
+ - cluster
+ - pgbouncer
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the Pooler. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ instances:
+ description: The number of pods trying to be scheduled
+ format: int32
+ type: integer
+ secrets:
+ description: The resource version of the config object
+ properties:
+ clientCA:
+ description: The client CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ pgBouncerSecrets:
+ description: The version of the secrets used by PgBouncer
+ properties:
+ authQuery:
+ description: The auth query secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ serverCA:
+ description: The server CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ serverTLS:
+ description: The server TLS secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: publications.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Publication
+ listKind: PublicationList
+ plural: publications
+ singular: publication
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Publication is the Schema for the publications API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PublicationSpec defines the desired state of Publication
+ properties:
+ cluster:
+ description: The name of the PostgreSQL cluster that identifies the
+ "publisher"
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ dbname:
+ description: |-
+ The name of the database where the publication will be installed in
+ the "publisher" cluster
+ type: string
+ x-kubernetes-validations:
+ - message: dbname is immutable
+ rule: self == oldSelf
+ name:
+ description: The name of the publication inside PostgreSQL
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Publication parameters part of the `WITH` clause as expected by
+ PostgreSQL `CREATE PUBLICATION` command
+ type: object
+ publicationReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this publication
+ enum:
+ - delete
+ - retain
+ type: string
+ target:
+ description: Target of the publication as expected by PostgreSQL `CREATE
+ PUBLICATION` command
+ properties:
+ allTables:
+ description: |-
+ Marks the publication as one that replicates changes for all tables
+ in the database, including tables created in the future.
+ Corresponding to `FOR ALL TABLES` in PostgreSQL.
+ type: boolean
+ x-kubernetes-validations:
+ - message: allTables is immutable
+ rule: self == oldSelf
+ objects:
+ description: Just the following schema objects
+ items:
+ description: PublicationTargetObject is an object to publish
+ properties:
+ table:
+ description: |-
+ Specifies a list of tables to add to the publication. Corresponding
+ to `FOR TABLE` in PostgreSQL.
+ properties:
+ columns:
+ description: The columns to publish
+ items:
+ type: string
+ type: array
+ name:
+ description: The table name
+ type: string
+ only:
+ description: Whether to limit to the table only or include
+ all its descendants
+ type: boolean
+ schema:
+ description: The schema name
+ type: string
+ required:
+ - name
+ type: object
+ tablesInSchema:
+ description: |-
+ Marks the publication as one that replicates changes for all tables
+ in the specified list of schemas, including tables created in the
+ future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL.
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: tablesInSchema and table are mutually exclusive
+ rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema)
+ && has(self.table))
+ maxItems: 100000
+ type: array
+ x-kubernetes-validations:
+ - message: specifying a column list when the publication also
+ publishes tablesInSchema is not supported
+ rule: '!(self.exists(o, has(o.table) && has(o.table.columns))
+ && self.exists(o, has(o.tablesInSchema)))'
+ type: object
+ x-kubernetes-validations:
+ - message: allTables and objects are mutually exclusive
+ rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables)
+ && has(self.objects))
+ required:
+ - cluster
+ - dbname
+ - name
+ - target
+ type: object
+ status:
+ description: PublicationStatus defines the observed state of Publication
+ properties:
+ applied:
+ description: Applied is true if the publication was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: scheduledbackups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ScheduledBackup
+ listKind: ScheduledBackupList
+ plural: scheduledbackups
+ singular: scheduledbackup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .status.lastScheduleTime
+ name: Last Backup
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ScheduledBackup is the Schema for the scheduledbackups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ScheduledBackup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ backupOwnerReference:
+ default: none
+ description: |-
+ Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum:
+ - none
+ - self
+ - cluster
+ type: string
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ immediate:
+ description: If the first backup has to be immediately start after
+ creation or not
+ type: boolean
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ - plugin
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ schedule:
+ description: |-
+ The schedule does not follow the same format used in Kubernetes CronJobs
+ as it includes an additional seconds specifier,
+ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ type: string
+ suspend:
+ description: If this backup is suspended or not
+ type: boolean
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ - schedule
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the ScheduledBackup. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ lastCheckTime:
+ description: The latest time the schedule
+ format: date-time
+ type: string
+ lastScheduleTime:
+ description: Information when was the last time that backup was successfully
+ scheduled.
+ format: date-time
+ type: string
+ nextScheduleTime:
+ description: Next time we will run a backup
+ format: date-time
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: subscriptions.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Subscription
+ listKind: SubscriptionList
+ plural: subscriptions
+ singular: subscription
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Subscription is the Schema for the subscriptions API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SubscriptionSpec defines the desired state of Subscription
+ properties:
+ cluster:
+ description: The name of the PostgreSQL cluster that identifies the
+ "subscriber"
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ dbname:
+ description: |-
+ The name of the database where the publication will be installed in
+ the "subscriber" cluster
+ type: string
+ x-kubernetes-validations:
+ - message: dbname is immutable
+ rule: self == oldSelf
+ externalClusterName:
+ description: The name of the external cluster with the publication
+ ("publisher")
+ type: string
+ name:
+ description: The name of the subscription inside PostgreSQL
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Subscription parameters part of the `WITH` clause as expected by
+ PostgreSQL `CREATE SUBSCRIPTION` command
+ type: object
+ publicationDBName:
+ description: |-
+ The name of the database containing the publication on the external
+ cluster. Defaults to the one in the external cluster definition.
+ type: string
+ publicationName:
+ description: |-
+ The name of the publication inside the PostgreSQL database in the
+ "publisher"
+ type: string
+ subscriptionReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this subscription
+ enum:
+ - delete
+ - retain
+ type: string
+ required:
+ - cluster
+ - dbname
+ - externalClusterName
+ - name
+ - publicationName
+ type: object
+ status:
+ description: SubscriptionStatus defines the observed state of Subscription
+ properties:
+ applied:
+ description: Applied is true if the subscription was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-database-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-database-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cnpg-manager
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ - secrets/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ - pods
+ - pods/exec
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - update
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - podmonitors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups
+ - clusters
+ - databases
+ - poolers
+ - publications
+ - scheduledbackups
+ - subscriptions
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups/status
+ - databases/status
+ - publications/status
+ - scheduledbackups/status
+ - subscriptions/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusterimagecatalogs
+ - imagecatalogs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/finalizers
+ - poolers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/status
+ - poolers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ - roles
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-publication-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-publication-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-subscription-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-subscription-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cnpg-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cnpg-manager
+subjects:
+- kind: ServiceAccount
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: v1
+data:
+ queries: |
+ backends:
+ query: |
+ SELECT sa.datname
+ , sa.usename
+ , sa.application_name
+ , states.state
+ , COALESCE(sa.count, 0) AS total
+ , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
+ FROM ( VALUES ('active')
+ , ('idle')
+ , ('idle in transaction')
+ , ('idle in transaction (aborted)')
+ , ('fastpath function call')
+ , ('disabled')
+ ) AS states(state)
+ LEFT JOIN (
+ SELECT datname
+ , state
+ , usename
+ , COALESCE(application_name, '') AS application_name
+ , COUNT(*)
+ , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
+ FROM pg_catalog.pg_stat_activity
+ GROUP BY datname, state, usename, application_name
+ ) sa ON states.state = sa.state
+ WHERE sa.usename IS NOT NULL
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - usename:
+ usage: "LABEL"
+ description: "Name of the user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - state:
+ usage: "LABEL"
+ description: "State of the backend"
+ - total:
+ usage: "GAUGE"
+ description: "Number of backends"
+ - max_tx_duration_seconds:
+ usage: "GAUGE"
+ description: "Maximum duration of a transaction in seconds"
+
+ backends_waiting:
+ query: |
+ SELECT count(*) AS total
+ FROM pg_catalog.pg_locks blocked_locks
+ JOIN pg_catalog.pg_locks blocking_locks
+ ON blocking_locks.locktype = blocked_locks.locktype
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
+ AND blocking_locks.pid != blocked_locks.pid
+ JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
+ WHERE NOT blocked_locks.granted
+ metrics:
+ - total:
+ usage: "GAUGE"
+ description: "Total number of backends that are currently waiting on other queries"
+
+ pg_database:
+ query: |
+ SELECT datname
+ , pg_catalog.pg_database_size(datname) AS size_bytes
+ , pg_catalog.age(datfrozenxid) AS xid_age
+ , pg_catalog.mxid_age(datminmxid) AS mxid_age
+ FROM pg_catalog.pg_database
+ WHERE datallowconn
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - size_bytes:
+ usage: "GAUGE"
+ description: "Disk space used by the database"
+ - xid_age:
+ usage: "GAUGE"
+ description: "Number of transactions from the frozen XID to the current one"
+ - mxid_age:
+ usage: "GAUGE"
+ description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
+
+ pg_postmaster:
+ query: |
+ SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
+ FROM pg_catalog.pg_postmaster_start_time()
+ metrics:
+ - start_time:
+ usage: "GAUGE"
+ description: "Time at which postgres started (based on epoch)"
+
+ pg_replication:
+ query: "SELECT CASE WHEN (
+ NOT pg_catalog.pg_is_in_recovery()
+ OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_catalog.pg_is_in_recovery() AS in_recovery,
+ EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
+ (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
+ metrics:
+ - lag:
+ usage: "GAUGE"
+ description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
+ - is_wal_receiver_up:
+ usage: "GAUGE"
+ description: "Whether the instance wal_receiver is up"
+ - streaming_replicas:
+ usage: "GAUGE"
+ description: "Number of streaming replicas connected to the instance"
+
+ pg_replication_slots:
+ query: |
+ SELECT slot_name,
+ slot_type,
+ database,
+ active,
+ (CASE pg_catalog.pg_is_in_recovery()
+ WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
+ ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ END) as pg_wal_lsn_diff
+ FROM pg_catalog.pg_replication_slots
+ WHERE NOT temporary
+ metrics:
+ - slot_name:
+ usage: "LABEL"
+ description: "Name of the replication slot"
+ - slot_type:
+ usage: "LABEL"
+ description: "Type of the replication slot"
+ - database:
+ usage: "LABEL"
+ description: "Name of the database"
+ - active:
+ usage: "GAUGE"
+ description: "Flag indicating whether the slot is active"
+ - pg_wal_lsn_diff:
+ usage: "GAUGE"
+ description: "Replication lag in bytes"
+
+ pg_stat_archiver:
+ query: |
+ SELECT archived_count
+ , failed_count
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
+ , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
+ , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_archiver
+ metrics:
+ - archived_count:
+ usage: "COUNTER"
+ description: "Number of WAL files that have been successfully archived"
+ - failed_count:
+ usage: "COUNTER"
+ description: "Number of failed attempts for archiving WAL files"
+ - seconds_since_last_archival:
+ usage: "GAUGE"
+ description: "Seconds since the last successful archival operation"
+ - seconds_since_last_failure:
+ usage: "GAUGE"
+ description: "Seconds since the last failed archival operation"
+ - last_archived_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving succeeded"
+ - last_failed_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving failed"
+ - last_archived_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Archived WAL start LSN"
+ - last_failed_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Last failed WAL LSN"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_bgwriter:
+ runonserver: "<17.0.0"
+ query: |
+ SELECT checkpoints_timed
+ , checkpoints_req
+ , checkpoint_write_time
+ , checkpoint_sync_time
+ , buffers_checkpoint
+ , buffers_clean
+ , maxwritten_clean
+ , buffers_backend
+ , buffers_backend_fsync
+ , buffers_alloc
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - checkpoint_write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
+ - checkpoint_sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
+ - buffers_checkpoint:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints"
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_backend:
+ usage: "COUNTER"
+ description: "Number of buffers written directly by a backend"
+ - buffers_backend_fsync:
+ usage: "COUNTER"
+ description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+
+ pg_stat_bgwriter_17:
+ runonserver: ">=17.0.0"
+ name: pg_stat_bgwriter
+ query: |
+ SELECT buffers_clean
+ , maxwritten_clean
+ , buffers_alloc
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_checkpointer:
+ runonserver: ">=17.0.0"
+ query: |
+ SELECT num_timed AS checkpoints_timed
+ , num_requested AS checkpoints_req
+ , restartpoints_timed
+ , restartpoints_req
+ , restartpoints_done
+ , write_time
+ , sync_time
+ , buffers_written
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_checkpointer
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - restartpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it"
+ - restartpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested restartpoints that have been performed"
+ - restartpoints_done:
+ usage: "COUNTER"
+ description: "Number of restartpoints that have been performed"
+ - write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds"
+ - sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds"
+ - buffers_written:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints and restartpoints"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_database:
+ query: |
+ SELECT datname
+ , xact_commit
+ , xact_rollback
+ , blks_read
+ , blks_hit
+ , tup_returned
+ , tup_fetched
+ , tup_inserted
+ , tup_updated
+ , tup_deleted
+ , conflicts
+ , temp_files
+ , temp_bytes
+ , deadlocks
+ , blk_read_time
+ , blk_write_time
+ FROM pg_catalog.pg_stat_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of this database"
+ - xact_commit:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been committed"
+ - xact_rollback:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been rolled back"
+ - blks_read:
+ usage: "COUNTER"
+ description: "Number of disk blocks read in this database"
+ - blks_hit:
+ usage: "COUNTER"
+ description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
+ - tup_returned:
+ usage: "COUNTER"
+ description: "Number of rows returned by queries in this database"
+ - tup_fetched:
+ usage: "COUNTER"
+ description: "Number of rows fetched by queries in this database"
+ - tup_inserted:
+ usage: "COUNTER"
+ description: "Number of rows inserted by queries in this database"
+ - tup_updated:
+ usage: "COUNTER"
+ description: "Number of rows updated by queries in this database"
+ - tup_deleted:
+ usage: "COUNTER"
+ description: "Number of rows deleted by queries in this database"
+ - conflicts:
+ usage: "COUNTER"
+ description: "Number of queries canceled due to conflicts with recovery in this database"
+ - temp_files:
+ usage: "COUNTER"
+ description: "Number of temporary files created by queries in this database"
+ - temp_bytes:
+ usage: "COUNTER"
+ description: "Total amount of data written to temporary files by queries in this database"
+ - deadlocks:
+ usage: "COUNTER"
+ description: "Number of deadlocks detected in this database"
+ - blk_read_time:
+ usage: "COUNTER"
+ description: "Time spent reading data file blocks by backends in this database, in milliseconds"
+ - blk_write_time:
+ usage: "COUNTER"
+ description: "Time spent writing data file blocks by backends in this database, in milliseconds"
+
+ pg_stat_replication:
+ primary: true
+ query: |
+ SELECT usename
+ , COALESCE(application_name, '') AS application_name
+ , COALESCE(client_addr::text, '') AS client_addr
+ , COALESCE(client_port::text, '') AS client_port
+ , EXTRACT(EPOCH FROM backend_start) AS backend_start
+ , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes
+ , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes
+ , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds
+ FROM pg_catalog.pg_stat_replication
+ metrics:
+ - usename:
+ usage: "LABEL"
+ description: "Name of the replication user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - client_addr:
+ usage: "LABEL"
+ description: "Client IP address"
+ - client_port:
+ usage: "LABEL"
+ description: "Client TCP port"
+ - backend_start:
+ usage: "COUNTER"
+ description: "Time when this process was started"
+ - backend_xmin_age:
+ usage: "COUNTER"
+ description: "The age of this standby's xmin horizon"
+ - sent_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location sent on this connection"
+ - write_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location written to disk by this standby server"
+ - flush_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server"
+ - replay_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server"
+ - write_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it"
+ - flush_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it"
+ - replay_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it"
+
+ pg_settings:
+ query: |
+ SELECT name,
+ CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
+ FROM pg_catalog.pg_settings
+ WHERE vartype IN ('integer', 'real', 'bool')
+ ORDER BY 1
+ metrics:
+ - name:
+ usage: "LABEL"
+ description: "Name of the setting"
+ - setting:
+ usage: "GAUGE"
+ description: "Setting value"
+kind: ConfigMap
+metadata:
+ labels:
+ cnpg.io/reload: ""
+ name: cnpg-default-monitoring
+ namespace: cnpg-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ app.kubernetes.io/name: cloudnative-pg
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-controller-manager
+ namespace: cnpg-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: cloudnative-pg
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ spec:
+ containers:
+ - args:
+ - controller
+ - --leader-elect
+ - --max-concurrent-reconciles=10
+ - --config-map-name=cnpg-controller-manager-config
+ - --secret-name=cnpg-controller-manager-config
+ - --webhook-port=9443
+ command:
+ - /manager
+ env:
+ - name: OPERATOR_IMAGE_NAME
+ value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MONITORING_QUERIES_CONFIGMAP
+ value: cnpg-default-monitoring
+ image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1
+ livenessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsGroup: 10001
+ runAsUser: 10001
+ seccompProfile:
+ type: RuntimeDefault
+ volumeMounts:
+ - mountPath: /controller
+ name: scratch-data
+ - mountPath: /run/secrets/cnpg.io/webhook
+ name: webhook-certificates
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ serviceAccountName: cnpg-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - emptyDir: {}
+ name: scratch-data
+ - name: webhook-certificates
+ secret:
+ defaultMode: 420
+ optional: true
+ secretName: cnpg-webhook-cert
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: cnpg-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: mbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: mcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: mscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: cnpg-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: vbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: vcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-pooler
+ failurePolicy: Fail
+ name: vpooler.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - poolers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: vscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
From 50a4e496a528d635ece11aa41476cc1b6843e338 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Mon, 9 Dec 2024 14:12:25 +0100
Subject: [PATCH 228/836] chore: update the release script to better handle RC
releases (#6301)
Signed-off-by: Marco Nenciarini
---
hack/release.sh | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/hack/release.sh b/hack/release.sh
index 0c0fc0596b..e82aeabc73 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -96,17 +96,19 @@ KUSTOMIZE="${REPO_ROOT}/bin/kustomize"
mkdir -p releases/
release_manifest="releases/cnpg-${release_version}.yaml"
+# shellcheck disable=SC2001
+release_branch="release-$(sed -e 's/^\([0-9]\+\.[0-9]\+\)\..*$/\1/' <<< "$release_version" )"
# Perform automated substitutions of the version string in the source code
sed -i -e "/Version *= *.*/Is/\".*\"/\"${release_version}\"/" \
-e "/DefaultOperatorImageName *= *.*/Is/\"\(.*\):.*\"/\"\1:${release_version}\"/" \
pkg/versions/versions.go
-sed -i -e "s@release-[0-9.]*/releases/cnpg-[0-9.]*.yaml@${branch}/releases/cnpg-${release_version}.yaml@g" \
- -e "s@artifacts/release-[0-9.]*/@artifacts/${branch}/@g" \
+sed -i -e "s@\(release-[0-9.]\+\|main\)/releases/cnpg-[0-9.]\+\(-rc.*\)\?.yaml@${branch}/releases/cnpg-${release_version}.yaml@g" \
+ -e "s@artifacts/release-[0-9.]*/@artifacts/${release_branch}/@g" \
docs/src/installation_upgrade.md
-sed -i -e "s@1\.[0-9]\+\.[0-9]\+@${release_version}@g" docs/src/kubectl-plugin.md
+sed -i -e "s@1\.[0-9]\+\.[0-9]\+\(-[a-z0-9]\+\)\?@${release_version}@g" docs/src/kubectl-plugin.md
CONFIG_TMP_DIR=$(mktemp -d)
cp -r config/* "${CONFIG_TMP_DIR}"
@@ -123,6 +125,7 @@ git checkout -b "release/v${release_version}"
git add \
pkg/versions/versions.go \
docs/src/installation_upgrade.md \
+ docs/src/kubectl-plugin.md \
"${release_manifest}"
git commit -sm "Version tag to ${release_version}"
git push origin -u "release/v${release_version}"
From ec06335f708c7365ebe1275b80609321a670bb24 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Mon, 9 Dec 2024 15:21:06 +0100
Subject: [PATCH 229/836] fix: panic recovering from an external server with no
backup configuration (#6300)
The instance manager panicked when recovering from an external server
with no backup configuration.
This patch fixes that and prevents such a configuration from being
applied using the validation webhook.
Closes: #6295
Signed-off-by: Leonardo Cecchi
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
api/v1/cluster_webhook.go | 16 ++++++++-
api/v1/cluster_webhook_test.go | 55 ++++++++++++++++++++++++++----
pkg/management/postgres/restore.go | 5 +++
3 files changed, 69 insertions(+), 7 deletions(-)
diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go
index 0d6da62c95..eec7a01f68 100644
--- a/api/v1/cluster_webhook.go
+++ b/api/v1/cluster_webhook.go
@@ -866,7 +866,9 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList {
return result
}
- _, found := r.ExternalCluster(r.Spec.Bootstrap.Recovery.Source)
+ externalCluster, found := r.ExternalCluster(r.Spec.Bootstrap.Recovery.Source)
+
+ // Ensure the existence of the external cluster
if !found {
result = append(
result,
@@ -876,6 +878,18 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList {
fmt.Sprintf("External cluster %v not found", r.Spec.Bootstrap.Recovery.Source)))
}
+ // Ensure the external cluster definition has enough information
+ // to be used to recover a data directory
+ if externalCluster.BarmanObjectStore == nil && externalCluster.PluginConfiguration == nil {
+ result = append(
+ result,
+ field.Invalid(
+ field.NewPath("spec", "bootstrap", "recovery", "source"),
+ r.Spec.Bootstrap.Recovery.Source,
+ fmt.Sprintf("External cluster %v cannot be used for recovery: "+
+ "both Barman and CNPG-i plugin configurations are missing", r.Spec.Bootstrap.Recovery.Source)))
+ }
+
return result
}
diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go
index cdbf5585c2..bd08a26511 100644
--- a/api/v1/cluster_webhook_test.go
+++ b/api/v1/cluster_webhook_test.go
@@ -23,6 +23,7 @@ import (
"strings"
"time"
+ "github.com/cloudnative-pg/barman-cloud/pkg/api"
"github.com/cloudnative-pg/machinery/pkg/image/reference"
pgversion "github.com/cloudnative-pg/machinery/pkg/postgres/version"
storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
@@ -2328,7 +2329,49 @@ var _ = Describe("bootstrap recovery validation", func() {
Expect(result).To(BeEmpty())
})
- It("does not complain when bootstrap recovery source matches one of the names of external clusters", func() {
+ Context("does not complain when bootstrap recovery source matches one of the names of external clusters", func() {
+ When("using a barman object store configuration", func() {
+ recoveryCluster := &Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ Recovery: &BootstrapRecovery{
+ Source: "test",
+ },
+ },
+ ExternalClusters: []ExternalCluster{
+ {
+ Name: "test",
+ BarmanObjectStore: &api.BarmanObjectStoreConfiguration{},
+ },
+ },
+ },
+ }
+ errorsList := recoveryCluster.validateBootstrapRecoverySource()
+ Expect(errorsList).To(BeEmpty())
+ })
+
+ When("using a plugin configuration", func() {
+ recoveryCluster := &Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ Recovery: &BootstrapRecovery{
+ Source: "test",
+ },
+ },
+ ExternalClusters: []ExternalCluster{
+ {
+ Name: "test",
+ PluginConfiguration: &PluginConfiguration{},
+ },
+ },
+ },
+ }
+ errorsList := recoveryCluster.validateBootstrapRecoverySource()
+ Expect(errorsList).To(BeEmpty())
+ })
+ })
+
+ It("complains when bootstrap recovery source does not match one of the names of external clusters", func() {
recoveryCluster := &Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
@@ -2338,16 +2381,16 @@ var _ = Describe("bootstrap recovery validation", func() {
},
ExternalClusters: []ExternalCluster{
{
- Name: "test",
+ Name: "another-test",
},
},
},
}
errorsList := recoveryCluster.validateBootstrapRecoverySource()
- Expect(errorsList).To(BeEmpty())
+ Expect(errorsList).ToNot(BeEmpty())
})
- It("complains when bootstrap recovery source does not match one of the names of external clusters", func() {
+ It("complains when bootstrap recovery source have no BarmanObjectStore nor plugin configuration", func() {
recoveryCluster := &Cluster{
Spec: ClusterSpec{
Bootstrap: &BootstrapConfiguration{
@@ -2357,13 +2400,13 @@ var _ = Describe("bootstrap recovery validation", func() {
},
ExternalClusters: []ExternalCluster{
{
- Name: "another-test",
+ Name: "test",
},
},
},
}
errorsList := recoveryCluster.validateBootstrapRecoverySource()
- Expect(errorsList).ToNot(BeEmpty())
+ Expect(errorsList).To(HaveLen(1))
})
})
diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go
index 2cdad8c8ea..979ed57f53 100644
--- a/pkg/management/postgres/restore.go
+++ b/pkg/management/postgres/restore.go
@@ -511,6 +511,11 @@ func (info InitInfo) loadBackupObjectFromExternalCluster(
if !found {
return nil, nil, fmt.Errorf("missing external cluster: %v", sourceName)
}
+
+ if server.BarmanObjectStore == nil {
+ return nil, nil, fmt.Errorf("missing barman object store configuration for source: %v", sourceName)
+ }
+
serverName := server.GetServerName()
env, err := barmanCredentials.EnvSetRestoreCloudCredentials(
From bd6e545aa5c7833154de259c142ccc9c0203e6c9 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Wed, 11 Dec 2024 14:01:43 +0100
Subject: [PATCH 230/836] fix: deadlock when a plugin is used multiple times
(#6309)
Now the operator requests one connection per used plugin, even if the
same plugin has been requested multiple times.
Fixes a deadlock arising when the same plugin is used multiple times.
The operator was acquiring multiple connections to the plugin, and
could get stuck without releasing the ones it had already taken
in case of high concurrency.
Closes: #6310
---------
Signed-off-by: Leonardo Cecchi
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
internal/cnpi/plugin/client/client.go | 11 ++++++++++-
internal/cnpi/plugin/repository/connection.go | 19 +++++++++++++++++--
internal/cnpi/plugin/repository/setup.go | 10 ++++++++++
internal/controller/cluster_controller.go | 6 +++++-
4 files changed, 42 insertions(+), 4 deletions(-)
diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go
index 9d62c1a7a7..e13515668c 100644
--- a/internal/cnpi/plugin/client/client.go
+++ b/internal/cnpi/plugin/client/client.go
@@ -20,6 +20,7 @@ import (
"context"
"github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
@@ -83,7 +84,15 @@ func WithPlugins(ctx context.Context, repository repository.Interface, names ...
result := &data{
repository: repository,
}
- if err := result.load(ctx, names...); err != nil {
+
+ // The following ensures that each plugin is loaded just one
+ // time, even when the same plugin has been requested multiple
+ // times.
+ loadingPlugins := stringset.From(names)
+ uniqueSortedPluginName := loadingPlugins.ToSortedList()
+
+ if err := result.load(ctx, uniqueSortedPluginName...); err != nil {
+ result.Close(ctx)
return nil, err
}
diff --git a/internal/cnpi/plugin/repository/connection.go b/internal/cnpi/plugin/repository/connection.go
index 3a563ac6a5..fb8e478c70 100644
--- a/internal/cnpi/plugin/repository/connection.go
+++ b/internal/cnpi/plugin/repository/connection.go
@@ -26,6 +26,10 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection"
)
+// maxConnectionAttempts is the maximum number of connections attempts to a
+// plugin. maxConnectionAttempts should be higher or equal to maxPoolSize
+const maxConnectionAttempts = 5
+
type releasingConnection struct {
connection.Interface
closer func() error
@@ -51,7 +55,7 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter
var resource *puddle.Resource[connection.Interface]
var err error
- for i := 0; i < maxPoolSize; i++ {
+ for i := 0; i < maxConnectionAttempts; i++ {
contextLogger.Trace("try getting connection")
resource, err = pool.Acquire(ctx)
if err != nil {
@@ -60,7 +64,10 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter
err = resource.Value().Ping(ctx)
if err != nil {
- contextLogger.Debug("Detected plugin connection error, closing the connection and trying again")
+ contextLogger.Info(
+ "Detected stale/broken plugin connection, closing and trying again",
+ "pluginName", name,
+ "err", err)
resource.Destroy()
} else {
break
@@ -71,9 +78,17 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter
return nil, fmt.Errorf("while getting plugin connection: %w", err)
}
+ contextLogger.Trace(
+ "Acquired logical plugin connection",
+ "name", name,
+ )
return &releasingConnection{
Interface: resource.Value(),
closer: func() error {
+ contextLogger.Trace(
+ "Released logical plugin connection",
+ "name", name,
+ )
// When the client has done its job with a plugin connection, it
// will be returned to the pool
resource.Release()
diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go
index e43b5f1091..ca06824075 100644
--- a/internal/cnpi/plugin/repository/setup.go
+++ b/internal/cnpi/plugin/repository/setup.go
@@ -60,6 +60,8 @@ type data struct {
pluginConnectionPool map[string]*puddle.Pool[connection.Interface]
}
+// maxPoolSize is the maximum number of connections in a plugin's connection
+// pool
const maxPoolSize = 5
func (r *data) setPluginProtocol(name string, protocol connection.Protocol) error {
@@ -92,6 +94,8 @@ func (r *data) setPluginProtocol(name string, protocol connection.Protocol) erro
WithValues("pluginName", name)
ctx = log.IntoContext(ctx, constructorLogger)
+ constructorLogger.Trace("Acquired physical plugin connection")
+
if handler, err = protocol.Dial(ctx); err != nil {
constructorLogger.Error(err, "Got error while connecting to plugin")
return nil, err
@@ -101,6 +105,12 @@ func (r *data) setPluginProtocol(name string, protocol connection.Protocol) erro
}
destructor := func(res connection.Interface) {
+ constructorLogger := log.
+ FromContext(context.Background()).
+ WithName("setPluginProtocol").
+ WithValues("pluginName", name)
+ constructorLogger.Trace("Released physical plugin connection")
+
err := res.Close()
if err != nil {
destructorLogger := log.FromContext(context.Background()).
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index fb2705f093..6194505985 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -177,7 +177,11 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
// Load the plugins required to bootstrap and reconcile this cluster
enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...)
- pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, enabledPluginNames...)
+
+ pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second)
+ defer cancelPluginLoading()
+
+ pluginClient, err := cnpgiClient.WithPlugins(pluginLoadingContext, r.Plugins, enabledPluginNames...)
if err != nil {
var errUnknownPlugin *repository.ErrUnknownPlugin
if errors.As(err, &errUnknownPlugin) {
From da2d0341a4a47a4e7e40950acde3ff501943b79d Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Wed, 11 Dec 2024 14:32:37 +0100
Subject: [PATCH 231/836] chore(refactor): isolate plugin loading function
(#6312)
Signed-off-by: Leonardo Cecchi
---
internal/cnpi/plugin/client/client.go | 26 +++++++++----------
internal/cnpi/plugin/repository/connection.go | 2 +-
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go
index e13515668c..100a0e027b 100644
--- a/internal/cnpi/plugin/client/client.go
+++ b/internal/cnpi/plugin/client/client.go
@@ -43,18 +43,6 @@ func (data *data) getPlugin(pluginName string) (connection.Interface, error) {
return nil, ErrPluginNotLoaded
}
-func (data *data) load(ctx context.Context, names ...string) error {
- for _, name := range names {
- pluginData, err := data.repository.GetConnection(ctx, name)
- if err != nil {
- return err
- }
-
- data.plugins = append(data.plugins, pluginData)
- }
- return nil
-}
-
func (data *data) MetadataList() []connection.Metadata {
result := make([]connection.Metadata, len(data.plugins))
for i := range data.plugins {
@@ -85,13 +73,25 @@ func WithPlugins(ctx context.Context, repository repository.Interface, names ...
repository: repository,
}
+ load := func(names ...string) error {
+ for _, name := range names {
+ pluginData, err := result.repository.GetConnection(ctx, name)
+ if err != nil {
+ return err
+ }
+
+ result.plugins = append(result.plugins, pluginData)
+ }
+ return nil
+ }
+
// The following ensures that each plugin is loaded just one
// time, even when the same plugin has been requested multiple
// times.
loadingPlugins := stringset.From(names)
uniqueSortedPluginName := loadingPlugins.ToSortedList()
- if err := result.load(ctx, uniqueSortedPluginName...); err != nil {
+ if err := load(uniqueSortedPluginName...); err != nil {
result.Close(ctx)
return nil, err
}
diff --git a/internal/cnpi/plugin/repository/connection.go b/internal/cnpi/plugin/repository/connection.go
index fb8e478c70..a71586f530 100644
--- a/internal/cnpi/plugin/repository/connection.go
+++ b/internal/cnpi/plugin/repository/connection.go
@@ -64,7 +64,7 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter
err = resource.Value().Ping(ctx)
if err != nil {
- contextLogger.Info(
+ contextLogger.Warning(
"Detected stale/broken plugin connection, closing and trying again",
"pluginName", name,
"err", err)
From e6cdceaeff438bf1892d6a504d268526bb0befda Mon Sep 17 00:00:00 2001
From: Jonathan Battiato
Date: Thu, 12 Dec 2024 10:41:44 +0100
Subject: [PATCH 232/836] chore(olm): improve scorecard test implementing
suggestions (#6106)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This patch adds the missing status field in the `olm-samples` as
reported in the "Suggestions" section from olm-scorecard tests.
Closes #5710
Signed-off-by: Jonathan Battiato
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Niccolò Fei
Co-authored-by: Jonathan Gonzalez V.
Co-authored-by: Niccolò Fei
---
.../cloudnative-pg.clusterserviceversion.yaml | 21 +++++++++++++++++++
config/olm-samples/postgresql_v1_backup.yaml | 2 ++
config/olm-samples/postgresql_v1_cluster.yaml | 2 ++
.../postgresql_v1_clusterimagecatalog.yaml | 1 +
.../olm-samples/postgresql_v1_database.yaml | 2 ++
.../postgresql_v1_imagecatalog.yaml | 1 +
config/olm-samples/postgresql_v1_pooler.yaml | 2 ++
.../postgresql_v1_publication.yaml | 2 ++
.../postgresql_v1_scheduledbackup.yaml | 2 ++
.../postgresql_v1_subscription.yaml | 2 ++
10 files changed, 37 insertions(+)
diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
index fc286e39e7..b78e7a927d 100644
--- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
+++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
@@ -894,6 +894,13 @@ spec:
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:text'
- 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ statusDescriptors:
+ - path: applied
+ displayName: Applied
+ description: Applied is true if the database was reconciled correctly
+ - path: message
+ displayName: Message
+ description: Message is the reconciliation output message
- kind: Publication
name: publications.postgresql.cnpg.io
displayName: Postgres Publication
@@ -919,6 +926,13 @@ spec:
- path: publicationReclaimPolicy
displayName: Publication reclaim policy
description: Specifies the action to take for the publication inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management.
+ statusDescriptors:
+ - path: applied
+ displayName: Applied
+ description: Applied is true if the publication was reconciled correctly
+ - path: message
+ displayName: Message
+ description: Message is the reconciliation output message
- kind: Subscription
name: subscriptions.postgresql.cnpg.io
displayName: Postgres Subscription
@@ -950,3 +964,10 @@ spec:
- path: subscriptionReclaimPolicy
displayName: Subscription reclaim policy
description: Specifies the action to take for the subscription inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management.
+ statusDescriptors:
+ - path: applied
+ displayName: Applied
+ description: Applied is true if the subscription was reconciled correctly
+ - path: message
+ displayName: Message
+ description: Message is the reconciliation output message
diff --git a/config/olm-samples/postgresql_v1_backup.yaml b/config/olm-samples/postgresql_v1_backup.yaml
index 330ede8589..40147cec70 100644
--- a/config/olm-samples/postgresql_v1_backup.yaml
+++ b/config/olm-samples/postgresql_v1_backup.yaml
@@ -5,3 +5,5 @@ metadata:
spec:
cluster:
name: cluster-sample
+status:
+ serverName:
diff --git a/config/olm-samples/postgresql_v1_cluster.yaml b/config/olm-samples/postgresql_v1_cluster.yaml
index 0a8204977b..40f324f07b 100644
--- a/config/olm-samples/postgresql_v1_cluster.yaml
+++ b/config/olm-samples/postgresql_v1_cluster.yaml
@@ -19,3 +19,5 @@ spec:
walStorage:
size: 1Gi
logLevel: info
+status:
+ instances: 3
diff --git a/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml b/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml
index 20e725876d..3ad7041ea2 100644
--- a/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml
+++ b/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml
@@ -1,3 +1,4 @@
+apiVersion: postgresql.cnpg.io/v1
kind: ClusterImageCatalog
metadata:
name: postgresql
diff --git a/config/olm-samples/postgresql_v1_database.yaml b/config/olm-samples/postgresql_v1_database.yaml
index b4d3d56b4d..748cb1ee7a 100644
--- a/config/olm-samples/postgresql_v1_database.yaml
+++ b/config/olm-samples/postgresql_v1_database.yaml
@@ -7,3 +7,5 @@ spec:
owner: app
cluster:
name: cluster-sample
+status:
+ applied: false
diff --git a/config/olm-samples/postgresql_v1_imagecatalog.yaml b/config/olm-samples/postgresql_v1_imagecatalog.yaml
index faf6d60a42..f141f90691 100644
--- a/config/olm-samples/postgresql_v1_imagecatalog.yaml
+++ b/config/olm-samples/postgresql_v1_imagecatalog.yaml
@@ -1,3 +1,4 @@
+apiVersion: postgresql.cnpg.io/v1
kind: ImageCatalog
metadata:
name: postgresql
diff --git a/config/olm-samples/postgresql_v1_pooler.yaml b/config/olm-samples/postgresql_v1_pooler.yaml
index 0400ed54c2..1ba730bb17 100644
--- a/config/olm-samples/postgresql_v1_pooler.yaml
+++ b/config/olm-samples/postgresql_v1_pooler.yaml
@@ -9,3 +9,5 @@ spec:
type: rw
pgbouncer:
poolMode: session
+status:
+ instances: 1
diff --git a/config/olm-samples/postgresql_v1_publication.yaml b/config/olm-samples/postgresql_v1_publication.yaml
index 598c02a2bb..89a54cbac6 100644
--- a/config/olm-samples/postgresql_v1_publication.yaml
+++ b/config/olm-samples/postgresql_v1_publication.yaml
@@ -9,3 +9,5 @@ spec:
name: cluster-sample
target:
allTables: true
+status:
+ applied: false
diff --git a/config/olm-samples/postgresql_v1_scheduledbackup.yaml b/config/olm-samples/postgresql_v1_scheduledbackup.yaml
index bd2350fddc..6e61b15d9f 100644
--- a/config/olm-samples/postgresql_v1_scheduledbackup.yaml
+++ b/config/olm-samples/postgresql_v1_scheduledbackup.yaml
@@ -6,3 +6,5 @@ spec:
schedule: "0 0 0 * * *"
cluster:
name: cluster-sample
+status:
+ lastCheckTime:
diff --git a/config/olm-samples/postgresql_v1_subscription.yaml b/config/olm-samples/postgresql_v1_subscription.yaml
index ecc016619b..6047977c3e 100644
--- a/config/olm-samples/postgresql_v1_subscription.yaml
+++ b/config/olm-samples/postgresql_v1_subscription.yaml
@@ -9,3 +9,5 @@ spec:
cluster:
name: cluster-sample-dest
externalClusterName: cluster-sample
+status:
+ applied: false
From 8494165d8bd5ffce0e4d8cd4d4343b90d59621a2 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Thu, 12 Dec 2024 15:04:57 +0100
Subject: [PATCH 233/836] tests: add unit tests to publication and subscription
controllers (#6284)
Close #6267
Signed-off-by: Jaime Silvela
Signed-off-by: wolfox
Signed-off-by: Armando Ruocco
Signed-off-by: Marco Nenciarini
Co-authored-by: wolfox
Co-authored-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
---
.../controller/database_controller.go | 9 +-
.../controller/database_controller_test.go | 373 +++++++---------
.../controller/publication_controller.go | 7 +-
.../controller/publication_controller_sql.go | 2 +-
.../controller/publication_controller_test.go | 368 ++++++++++++++++
.../controller/subscription_controller.go | 12 +-
.../controller/subscription_controller_sql.go | 2 +-
.../subscription_controller_test.go | 397 ++++++++++++++++++
8 files changed, 943 insertions(+), 227 deletions(-)
create mode 100644 internal/management/controller/publication_controller_test.go
create mode 100644 internal/management/controller/subscription_controller_test.go
diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go
index 7fdbf5ba22..ad9ed1b14e 100644
--- a/internal/management/controller/database_controller.go
+++ b/internal/management/controller/database_controller.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+ "database/sql"
"fmt"
"time"
@@ -39,6 +40,7 @@ type DatabaseReconciler struct {
instance instanceInterface
finalizerReconciler *finalizerReconciler[*apiv1.Database]
+ getSuperUserDB func() (*sql.DB, error)
}
// databaseReconciliationInterval is the time between the
@@ -143,7 +145,7 @@ func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1
if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete {
return nil
}
- sqlDB, err := r.instance.GetSuperUserDB()
+ sqlDB, err := r.getSuperUserDB()
if err != nil {
return fmt.Errorf("while getting DB connection: %w", err)
}
@@ -159,6 +161,9 @@ func NewDatabaseReconciler(
dr := &DatabaseReconciler{
Client: mgr.GetClient(),
instance: instance,
+ getSuperUserDB: func() (*sql.DB, error) {
+ return instance.GetSuperUserDB()
+ },
}
dr.finalizerReconciler = newFinalizerReconciler(
@@ -184,7 +189,7 @@ func (r *DatabaseReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, er
}
func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.Database) error {
- db, err := r.instance.GetSuperUserDB()
+ db, err := r.getSuperUserDB()
if err != nil {
return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err)
}
diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go
index d41d7eab57..be9c8487cf 100644
--- a/internal/management/controller/database_controller_test.go
+++ b/internal/management/controller/database_controller_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package controller
import (
+ "context"
"database/sql"
"fmt"
@@ -40,14 +41,9 @@ import (
. "github.com/onsi/gomega"
)
-type fakeInstanceData struct {
- *postgres.Instance
- db *sql.DB
-}
-
-func (f *fakeInstanceData) GetSuperUserDB() (*sql.DB, error) {
- return f.db, nil
-}
+const databaseDetectionQuery = `SELECT count(*)
+ FROM pg_database
+ WHERE datname = $1`
var _ = Describe("Managed Database status", func() {
var (
@@ -95,11 +91,6 @@ var _ = Describe("Managed Database status", func() {
WithPodName("cluster-example-1").
WithClusterName("cluster-example")
- f := fakeInstanceData{
- Instance: pgInstance,
- db: db,
- }
-
fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
WithObjects(cluster, database).
WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Database{}).
@@ -108,7 +99,10 @@ var _ = Describe("Managed Database status", func() {
r = &DatabaseReconciler{
Client: fakeClient,
Scheme: schemeBuilder.BuildWithAllKnownScheme(),
- instance: &f,
+ instance: pgInstance,
+ getSuperUserDB: func() (*sql.DB, error) {
+ return db, nil
+ },
}
r.finalizerReconciler = newFinalizerReconciler(
fakeClient,
@@ -122,193 +116,160 @@ var _ = Describe("Managed Database status", func() {
})
It("adds finalizer and sets status ready on success", func(ctx SpecContext) {
- Expect(database.Finalizers).To(BeEmpty())
-
- // Mocking DetectDB
expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
- dbMock.ExpectQuery(`SELECT count(*)
- FROM pg_database
- WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue)
+ dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name).
+ WillReturnRows(expectedValue)
- // Mocking CreateDB
expectedCreate := sqlmock.NewResult(0, 1)
expectedQuery := fmt.Sprintf(
"CREATE DATABASE %s OWNER %s",
- pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(),
+ pgx.Identifier{database.Spec.Name}.Sanitize(),
+ pgx.Identifier{database.Spec.Owner}.Sanitize(),
)
dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
- // Reconcile and get the updated object
- _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
-
- var updatedDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &updatedDatabase)
+ err := reconcileDatabase(ctx, fakeClient, r, database)
Expect(err).ToNot(HaveOccurred())
- Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeTrue()))
- Expect(updatedDatabase.Status.Message).Should(BeEmpty())
- Expect(updatedDatabase.Finalizers).NotTo(BeEmpty())
+ Expect(database.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(database.GetStatusMessage()).Should(BeEmpty())
+ Expect(database.GetFinalizers()).NotTo(BeEmpty())
})
It("database object inherits error after patching", func(ctx SpecContext) {
- // Mocking DetectDB
+ expectedError := fmt.Errorf("no permission")
expectedValue := sqlmock.NewRows([]string{""}).AddRow("1")
- dbMock.ExpectQuery(`SELECT count(*)
- FROM pg_database
- WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue)
+ dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name).
+ WillReturnRows(expectedValue)
- // Mocking Alter Database
- expectedError := fmt.Errorf("no permission")
expectedQuery := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s",
pgx.Identifier{database.Spec.Name}.Sanitize(),
pgx.Identifier{database.Spec.Owner}.Sanitize(),
)
dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError)
- // Reconcile and get the updated object
- _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
-
- var updatedDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &updatedDatabase)
+ err := reconcileDatabase(ctx, fakeClient, r, database)
Expect(err).ToNot(HaveOccurred())
- Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse()))
- Expect(updatedDatabase.Status.Message).Should(ContainSubstring(expectedError.Error()))
+ Expect(database.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(database.GetStatusMessage()).Should(ContainSubstring(expectedError.Error()))
})
- It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) {
- Expect(database.Finalizers).To(BeEmpty())
-
- // Mocking DetectDB
- expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
- dbMock.ExpectQuery(`SELECT count(*)
- FROM pg_database
- WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue)
-
- // Mocking CreateDB
- expectedCreate := sqlmock.NewResult(0, 1)
- expectedQuery := fmt.Sprintf(
- "CREATE DATABASE %s OWNER %s",
- pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(),
- )
- dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
-
- // Reconcile and get the updated object
- _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
-
- var updatedDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &updatedDatabase)
- Expect(err).ToNot(HaveOccurred())
-
- Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeTrue()))
- Expect(updatedDatabase.Status.Message).Should(BeEmpty())
- Expect(updatedDatabase.Finalizers).NotTo(BeEmpty())
-
- // the next 3 lines are a hacky bit to make sure the next reconciler
- // call doesn't skip on account of Generation == ObservedGeneration.
- // See fake.Client known issues with `Generation`
- // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
- currentDatabase := updatedDatabase.DeepCopy()
- updatedDatabase.Status.ObservedGeneration = 2
- Expect(fakeClient.Status().Patch(ctx, &updatedDatabase, client.MergeFrom(currentDatabase))).To(Succeed())
-
- // We now look at the behavior when we delete the Database object
- Expect(fakeClient.Delete(ctx, database)).To(Succeed())
-
- // the Database object is Deleted, but its finalizer prevents removal from
- // the API
- var fadingDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &fadingDatabase)
- Expect(err).ToNot(HaveOccurred())
- Expect(fadingDatabase.DeletionTimestamp).NotTo(BeZero())
- Expect(fadingDatabase.Finalizers).NotTo(BeEmpty())
-
- // Mocking Drop Database
- expectedDrop := fmt.Sprintf("DROP DATABASE IF EXISTS %s",
- pgx.Identifier{database.Spec.Name}.Sanitize(),
- )
- dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1))
-
- // Reconcile and get the updated object
- _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
+ When("reclaim policy is delete", func() {
+ It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) {
+ // Mocking DetectDB
+ expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name).
+ WillReturnRows(expectedValue)
+
+ // Mocking CreateDB
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE DATABASE %s OWNER %s",
+ pgx.Identifier{database.Spec.Name}.Sanitize(),
+ pgx.Identifier{database.Spec.Owner}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ // Mocking Drop Database
+ expectedDrop := fmt.Sprintf("DROP DATABASE IF EXISTS %s",
+ pgx.Identifier{database.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1))
+
+ err := reconcileDatabase(ctx, fakeClient, r, database)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Plain successful reconciliation, finalizers have been created
+ Expect(database.GetFinalizers()).NotTo(BeEmpty())
+ Expect(database.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(database.Status.Message).Should(BeEmpty())
+
+ // The next 2 lines are a hacky bit to make sure the next reconciler
+ // call doesn't skip on account of Generation == ObservedGeneration.
+ // See fake.Client known issues with `Generation`
+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
+ database.SetGeneration(database.GetGeneration() + 1)
+ Expect(fakeClient.Update(ctx, database)).To(Succeed())
+
+ // We now look at the behavior when we delete the Database object
+ Expect(fakeClient.Delete(ctx, database)).To(Succeed())
+
+ err = reconcileDatabase(ctx, fakeClient, r, database)
+ Expect(err).To(HaveOccurred())
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ })
+ })
- var finalDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &finalDatabase)
- Expect(err).To(HaveOccurred())
- Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ When("reclaim policy is retain", func() {
+ It("on deletion it removes finalizers and does NOT drop the DB", func(ctx SpecContext) {
+ database.Spec.ReclaimPolicy = apiv1.DatabaseReclaimRetain
+ Expect(fakeClient.Update(ctx, database)).To(Succeed())
+
+ // Mocking DetectDB
+ expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name).
+ WillReturnRows(expectedValue)
+
+ // Mocking CreateDB
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE DATABASE %s OWNER %s",
+ pgx.Identifier{database.Spec.Name}.Sanitize(),
+ pgx.Identifier{database.Spec.Owner}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ err := reconcileDatabase(ctx, fakeClient, r, database)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Plain successful reconciliation, finalizers have been created
+ Expect(database.GetFinalizers()).NotTo(BeEmpty())
+ Expect(database.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(database.Status.Message).Should(BeEmpty())
+
+ // The next 2 lines are a hacky bit to make sure the next reconciler
+ // call doesn't skip on account of Generation == ObservedGeneration.
+ // See fake.Client known issues with `Generation`
+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
+ database.SetGeneration(database.GetGeneration() + 1)
+ Expect(fakeClient.Update(ctx, database)).To(Succeed())
+
+ // We now look at the behavior when we delete the Database object
+ Expect(fakeClient.Delete(ctx, database)).To(Succeed())
+
+ err = reconcileDatabase(ctx, fakeClient, r, database)
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ })
})
It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) {
- // since the fakeClient has the `cluster-example` cluster, let's reference
+ // Since the fakeClient has the `cluster-example` cluster, let's reference
// another cluster `cluster-other` that is not found by the fakeClient
pgInstance := postgres.NewInstance().
WithNamespace("default").
WithPodName("cluster-other-1").
WithClusterName("cluster-other")
- f := fakeInstanceData{
- Instance: pgInstance,
- db: db,
- }
-
r = &DatabaseReconciler{
Client: fakeClient,
Scheme: schemeBuilder.BuildWithAllKnownScheme(),
- instance: &f,
+ instance: pgInstance,
+ getSuperUserDB: func() (*sql.DB, error) {
+ return db, nil
+ },
}
- // patching the Database object to reference the newly created Cluster
- originalDatabase := database.DeepCopy()
+ // Updating the Database object to reference the newly created Cluster
database.Spec.ClusterRef.Name = "cluster-other"
- Expect(fakeClient.Patch(ctx, database, client.MergeFrom(originalDatabase))).To(Succeed())
-
- // Reconcile and get the updated object
- _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
+ Expect(fakeClient.Update(ctx, database)).To(Succeed())
- var updatedDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &updatedDatabase)
+ err := reconcileDatabase(ctx, fakeClient, r, database)
Expect(err).ToNot(HaveOccurred())
- Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse()))
- Expect(updatedDatabase.Status.Message).Should(ContainSubstring(`"cluster-other" not found`))
+ Expect(database.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(database.Status.Message).Should(ContainSubstring(
+ fmt.Sprintf("%q not found", database.Spec.ClusterRef.Name)))
})
It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) {
@@ -334,13 +295,16 @@ var _ = Describe("Managed Database status", func() {
Name: otherDatabase.Name,
}})
- // Expect the reconciler to exit silently since the object doesn't exist
+ // Expect the reconciler to exit silently, since the object doesn't exist
Expect(err).ToNot(HaveOccurred())
Expect(result).Should(BeZero()) // nothing to do, since the DB is being deleted
})
It("drops database with ensure absent option", func(ctx SpecContext) {
- // Mocking dropDatabase
+ // Update the obj to set EnsureAbsent
+ database.Spec.Ensure = apiv1.EnsureAbsent
+ Expect(fakeClient.Update(ctx, database)).To(Succeed())
+
expectedValue := sqlmock.NewResult(0, 1)
expectedQuery := fmt.Sprintf(
"DROP DATABASE IF EXISTS %s",
@@ -348,21 +312,7 @@ var _ = Describe("Managed Database status", func() {
)
dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue)
- // Update the obj to set EnsureAbsent
- database.Spec.Ensure = apiv1.EnsureAbsent
- Expect(fakeClient.Update(ctx, database)).To(Succeed())
-
- // Reconcile and get the updated object
- _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
-
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, database)
+ err := reconcileDatabase(ctx, fakeClient, r, database)
Expect(err).ToNot(HaveOccurred())
Expect(database.Status.Applied).To(HaveValue(BeTrue()))
@@ -371,26 +321,11 @@ var _ = Describe("Managed Database status", func() {
})
It("marks as failed if the target Database is already being managed", func(ctx SpecContext) {
- // The Database obj currently managing "test-database"
- currentManager := &apiv1.Database{
- ObjectMeta: metav1.ObjectMeta{
- Name: "current-manager",
- Namespace: "default",
- },
- Spec: apiv1.DatabaseSpec{
- ClusterRef: corev1.LocalObjectReference{
- Name: cluster.Name,
- },
- Name: "test-database",
- Owner: "app",
- },
- Status: apiv1.DatabaseStatus{
- Applied: ptr.To(true),
- ObservedGeneration: 1,
- },
- }
+ // Let's force the database to have a past reconciliation
+ database.Status.ObservedGeneration = 2
+ Expect(fakeClient.Status().Update(ctx, database)).To(Succeed())
- // A new Database Object targeting the same "test-database"
+ // A new Database Object targeting the same "db-one"
dbDuplicate := &apiv1.Database{
ObjectMeta: metav1.ObjectMeta{
Name: "db-duplicate",
@@ -401,29 +336,19 @@ var _ = Describe("Managed Database status", func() {
ClusterRef: corev1.LocalObjectReference{
Name: cluster.Name,
},
- Name: "test-database",
+ Name: "db-one",
Owner: "app",
},
}
- Expect(fakeClient.Create(ctx, currentManager)).To(Succeed())
+ // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed())
Expect(fakeClient.Create(ctx, dbDuplicate)).To(Succeed())
- // Reconcile and get the updated object
- _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: dbDuplicate.Namespace,
- Name: dbDuplicate.Name,
- }})
- Expect(err).ToNot(HaveOccurred())
-
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: dbDuplicate.Namespace,
- Name: dbDuplicate.Name,
- }, dbDuplicate)
+ err := reconcileDatabase(ctx, fakeClient, r, dbDuplicate)
Expect(err).ToNot(HaveOccurred())
expectedError := fmt.Sprintf("%q is already managed by object %q",
- dbDuplicate.Spec.Name, currentManager.Name)
+ dbDuplicate.Spec.Name, database.Name)
Expect(dbDuplicate.Status.Applied).To(HaveValue(BeFalse()))
Expect(dbDuplicate.Status.Message).To(ContainSubstring(expectedError))
Expect(dbDuplicate.Status.ObservedGeneration).To(BeZero())
@@ -436,20 +361,28 @@ var _ = Describe("Managed Database status", func() {
}
Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed())
- _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
- Namespace: database.Namespace,
- Name: database.Spec.Name,
- }})
+ err := reconcileDatabase(ctx, fakeClient, r, database)
Expect(err).ToNot(HaveOccurred())
- var updatedDatabase apiv1.Database
- err = fakeClient.Get(ctx, client.ObjectKey{
- Namespace: database.Namespace,
- Name: database.Name,
- }, &updatedDatabase)
- Expect(err).ToNot(HaveOccurred())
-
- Expect(updatedDatabase.Status.Applied).Should(BeNil())
- Expect(updatedDatabase.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary"))
+ Expect(database.Status.Applied).Should(BeNil())
+ Expect(database.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary"))
})
})
+
+func reconcileDatabase(
+ ctx context.Context,
+ fakeClient client.Client,
+ r *DatabaseReconciler,
+ database *apiv1.Database,
+) error {
+ GinkgoT().Helper()
+ _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
+ Namespace: database.GetNamespace(),
+ Name: database.GetName(),
+ }})
+ Expect(err).ToNot(HaveOccurred())
+ return fakeClient.Get(ctx, client.ObjectKey{
+ Namespace: database.GetNamespace(),
+ Name: database.GetName(),
+ }, database)
+}
diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go
index d268367f1e..06fb6dad6a 100644
--- a/internal/management/controller/publication_controller.go
+++ b/internal/management/controller/publication_controller.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+ "database/sql"
"fmt"
"time"
@@ -39,6 +40,7 @@ type PublicationReconciler struct {
instance *postgres.Instance
finalizerReconciler *finalizerReconciler[*apiv1.Publication]
+ getDB func(name string) (*sql.DB, error)
}
// publicationReconciliationInterval is the time between the
@@ -153,7 +155,7 @@ func (r *PublicationReconciler) evaluateDropPublication(ctx context.Context, pub
if pub.Spec.ReclaimPolicy != apiv1.PublicationReclaimDelete {
return nil
}
- db, err := r.instance.ConnectionPool().Connection(pub.Spec.DBName)
+ db, err := r.getDB(pub.Spec.DBName)
if err != nil {
return fmt.Errorf("while getting DB connection: %w", err)
}
@@ -169,6 +171,9 @@ func NewPublicationReconciler(
pr := &PublicationReconciler{
Client: mgr.GetClient(),
instance: instance,
+ getDB: func(name string) (*sql.DB, error) {
+ return instance.ConnectionPool().Connection(name)
+ },
}
pr.finalizerReconciler = newFinalizerReconciler(
diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go
index e179e71bf2..0938111885 100644
--- a/internal/management/controller/publication_controller_sql.go
+++ b/internal/management/controller/publication_controller_sql.go
@@ -28,7 +28,7 @@ import (
)
func (r *PublicationReconciler) alignPublication(ctx context.Context, obj *apiv1.Publication) error {
- db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName)
+ db, err := r.getDB(obj.Spec.DBName)
if err != nil {
return fmt.Errorf("while getting DB connection: %w", err)
}
diff --git a/internal/management/controller/publication_controller_test.go b/internal/management/controller/publication_controller_test.go
new file mode 100644
index 0000000000..ea77ba6002
--- /dev/null
+++ b/internal/management/controller/publication_controller_test.go
@@ -0,0 +1,368 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/jackc/pgx/v5"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+const publicationDetectionQuery = `SELECT count(*)
+ FROM pg_publication
+ WHERE pubname = $1`
+
+var _ = Describe("Managed publication controller tests", func() {
+ var (
+ dbMock sqlmock.Sqlmock
+ db *sql.DB
+ publication *apiv1.Publication
+ cluster *apiv1.Cluster
+ r *PublicationReconciler
+ fakeClient client.Client
+ err error
+ )
+
+ BeforeEach(func() {
+ cluster = &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster-example",
+ Namespace: "default",
+ },
+ Status: apiv1.ClusterStatus{
+ CurrentPrimary: "cluster-example-1",
+ TargetPrimary: "cluster-example-1",
+ },
+ }
+ publication = &apiv1.Publication{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pub-one",
+ Namespace: "default",
+ Generation: 1,
+ },
+ Spec: apiv1.PublicationSpec{
+ ClusterRef: corev1.LocalObjectReference{
+ Name: cluster.Name,
+ },
+ ReclaimPolicy: apiv1.PublicationReclaimDelete,
+ Name: "pub-all",
+ DBName: "app",
+ Target: apiv1.PublicationTarget{
+ AllTables: true,
+ Objects: []apiv1.PublicationTargetObject{
+ {TablesInSchema: "public"},
+ },
+ },
+ },
+ }
+ db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+
+ pgInstance := postgres.NewInstance().
+ WithNamespace("default").
+ WithPodName("cluster-example-1").
+ WithClusterName("cluster-example")
+
+ fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithObjects(cluster, publication).
+ WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Publication{}).
+ Build()
+
+ r = &PublicationReconciler{
+ Client: fakeClient,
+ Scheme: schemeBuilder.BuildWithAllKnownScheme(),
+ instance: pgInstance,
+ getDB: func(_ string) (*sql.DB, error) {
+ return db, nil
+ },
+ }
+ r.finalizerReconciler = newFinalizerReconciler(
+ fakeClient,
+ utils.PublicationFinalizerName,
+ r.evaluateDropPublication,
+ )
+ })
+
+ AfterEach(func() {
+ Expect(dbMock.ExpectationsWereMet()).To(Succeed())
+ })
+
+ It("adds finalizer and sets status ready on success", func(ctx SpecContext) {
+ noHits := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name).
+ WillReturnRows(noHits)
+
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE PUBLICATION %s FOR ALL TABLES",
+ pgx.Identifier{publication.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ err := reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(publication.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(publication.GetStatusMessage()).Should(BeEmpty())
+ Expect(publication.GetFinalizers()).NotTo(BeEmpty())
+ })
+
+ It("publication object inherits error after patching", func(ctx SpecContext) {
+ expectedError := fmt.Errorf("no permission")
+ oneHit := sqlmock.NewRows([]string{""}).AddRow("1")
+ dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name).
+ WillReturnRows(oneHit)
+
+ expectedQuery := fmt.Sprintf("ALTER PUBLICATION %s SET TABLES IN SCHEMA \"public\"",
+ pgx.Identifier{publication.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError)
+
+ err := reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(publication.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(publication.Status.Message).Should(ContainSubstring(expectedError.Error()))
+ })
+
+ When("reclaim policy is delete", func() {
+ It("on deletion it removes finalizers and drops the Publication", func(ctx SpecContext) {
+ // Mocking Detect publication
+ expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name).
+ WillReturnRows(expectedValue)
+
+ // Mocking Create publication
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE PUBLICATION %s FOR ALL TABLES",
+ pgx.Identifier{publication.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ // Mocking Drop Publication
+ expectedDrop := fmt.Sprintf("DROP PUBLICATION IF EXISTS %s",
+ pgx.Identifier{publication.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1))
+
+ err := reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Plain successful reconciliation, finalizers have been created
+ Expect(publication.GetFinalizers()).NotTo(BeEmpty())
+ Expect(publication.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(publication.Status.Message).Should(BeEmpty())
+
+ // The next 2 lines are a hacky bit to make sure the next reconciler
+ // call doesn't skip on account of Generation == ObservedGeneration.
+ // See fake.Client known issues with `Generation`
+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
+ publication.SetGeneration(publication.GetGeneration() + 1)
+ Expect(fakeClient.Update(ctx, publication)).To(Succeed())
+
+ // We now look at the behavior when we delete the Database object
+ Expect(fakeClient.Delete(ctx, publication)).To(Succeed())
+
+ err = reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).To(HaveOccurred())
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ })
+ })
+
+ When("reclaim policy is retain", func() {
+ It("on deletion it removes finalizers and does NOT drop the Publication", func(ctx SpecContext) {
+ publication.Spec.ReclaimPolicy = apiv1.PublicationReclaimRetain
+ Expect(fakeClient.Update(ctx, publication)).To(Succeed())
+
+ // Mocking Detect publication
+ expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name).
+ WillReturnRows(expectedValue)
+
+ // Mocking Create publication
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE PUBLICATION %s FOR ALL TABLES",
+ pgx.Identifier{publication.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ err := reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Plain successful reconciliation, finalizers have been created
+ Expect(publication.GetFinalizers()).NotTo(BeEmpty())
+ Expect(publication.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(publication.Status.Message).Should(BeEmpty())
+
+ // The next 2 lines are a hacky bit to make sure the next reconciler
+ // call doesn't skip on account of Generation == ObservedGeneration.
+ // See fake.Client known issues with `Generation`
+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
+ publication.SetGeneration(publication.GetGeneration() + 1)
+ Expect(fakeClient.Update(ctx, publication)).To(Succeed())
+
+ // We now look at the behavior when we delete the Database object
+ Expect(fakeClient.Delete(ctx, publication)).To(Succeed())
+
+ err = reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).To(HaveOccurred())
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ })
+ })
+
+ It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) {
+ // Since the fakeClient has the `cluster-example` cluster, let's reference
+ // another cluster `cluster-other` that is not found by the fakeClient
+ pgInstance := postgres.NewInstance().
+ WithNamespace("default").
+ WithPodName("cluster-other-1").
+ WithClusterName("cluster-other")
+
+ r = &PublicationReconciler{
+ Client: fakeClient,
+ Scheme: schemeBuilder.BuildWithAllKnownScheme(),
+ instance: pgInstance,
+ getDB: func(_ string) (*sql.DB, error) {
+ return db, nil
+ },
+ }
+
+ // Updating the publication object to reference the newly created Cluster
+ publication.Spec.ClusterRef.Name = "cluster-other"
+ Expect(fakeClient.Update(ctx, publication)).To(Succeed())
+
+ err := reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(publication.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(publication.GetStatusMessage()).Should(ContainSubstring(
+ fmt.Sprintf("%q not found", publication.Spec.ClusterRef.Name)))
+ })
+
+ It("skips reconciliation if publication object isn't found (deleted publication)", func(ctx SpecContext) {
+ // Initialize a new Publication but without creating it in the K8S Cluster
+ otherPublication := &apiv1.Publication{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pub-other",
+ Namespace: "default",
+ Generation: 1,
+ },
+ Spec: apiv1.PublicationSpec{
+ ClusterRef: corev1.LocalObjectReference{
+ Name: cluster.Name,
+ },
+ Name: "pub-all",
+ },
+ }
+
+ // Reconcile the publication that hasn't been created in the K8S Cluster
+ result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
+ Namespace: otherPublication.Namespace,
+ Name: otherPublication.Name,
+ }})
+
+ // Expect the reconciler to exit silently, since the object doesn't exist
+ Expect(err).ToNot(HaveOccurred())
+ Expect(result).Should(BeZero())
+ })
+
+ It("marks as failed if the target publication is already being managed", func(ctx SpecContext) {
+ // Let's force the publication to have a past reconciliation
+ publication.Status.ObservedGeneration = 2
+ Expect(fakeClient.Status().Update(ctx, publication)).To(Succeed())
+
+ // A new Publication Object targeting the same "pub-all"
+ pubDuplicate := &apiv1.Publication{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pub-duplicate",
+ Namespace: "default",
+ Generation: 1,
+ },
+ Spec: apiv1.PublicationSpec{
+ ClusterRef: corev1.LocalObjectReference{
+ Name: cluster.Name,
+ },
+ Name: "pub-all",
+ },
+ }
+
+ // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed())
+ Expect(fakeClient.Create(ctx, pubDuplicate)).To(Succeed())
+
+ err := reconcilePublication(ctx, fakeClient, r, pubDuplicate)
+ Expect(err).ToNot(HaveOccurred())
+
+ expectedError := fmt.Sprintf("%q is already managed by object %q",
+ pubDuplicate.Spec.Name, publication.Name)
+ Expect(pubDuplicate.Status.Applied).To(HaveValue(BeFalse()))
+ Expect(pubDuplicate.Status.Message).To(ContainSubstring(expectedError))
+ Expect(pubDuplicate.Status.ObservedGeneration).To(BeZero())
+ })
+
+ It("properly signals a publication is on a replica cluster", func(ctx SpecContext) {
+ initialCluster := cluster.DeepCopy()
+ cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{
+ Enabled: ptr.To(true),
+ }
+ Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed())
+
+ err := reconcilePublication(ctx, fakeClient, r, publication)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(publication.Status.Applied).Should(BeNil())
+ Expect(publication.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary"))
+ })
+})
+
+func reconcilePublication(
+ ctx context.Context,
+ fakeClient client.Client,
+ r *PublicationReconciler,
+ publication *apiv1.Publication,
+) error {
+ GinkgoT().Helper()
+ _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
+ Namespace: publication.GetNamespace(),
+ Name: publication.GetName(),
+ }})
+ Expect(err).ToNot(HaveOccurred())
+ return fakeClient.Get(ctx, client.ObjectKey{
+ Namespace: publication.GetNamespace(),
+ Name: publication.GetName(),
+ }, publication)
+}
diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go
index 5fae540722..4f8d5c7583 100644
--- a/internal/management/controller/subscription_controller.go
+++ b/internal/management/controller/subscription_controller.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+ "database/sql"
"fmt"
"time"
@@ -40,6 +41,7 @@ type SubscriptionReconciler struct {
instance *postgres.Instance
finalizerReconciler *finalizerReconciler[*apiv1.Subscription]
+ getDB func(name string) (*sql.DB, error)
}
// subscriptionReconciliationInterval is the time between the
@@ -167,7 +169,7 @@ func (r *SubscriptionReconciler) evaluateDropSubscription(ctx context.Context, s
return nil
}
- db, err := r.instance.ConnectionPool().Connection(sub.Spec.DBName)
+ db, err := r.getDB(sub.Spec.DBName)
if err != nil {
return fmt.Errorf("while getting DB connection: %w", err)
}
@@ -179,7 +181,13 @@ func NewSubscriptionReconciler(
mgr manager.Manager,
instance *postgres.Instance,
) *SubscriptionReconciler {
- sr := &SubscriptionReconciler{Client: mgr.GetClient(), instance: instance}
+ sr := &SubscriptionReconciler{
+ Client: mgr.GetClient(),
+ instance: instance,
+ getDB: func(name string) (*sql.DB, error) {
+ return instance.ConnectionPool().Connection(name)
+ },
+ }
sr.finalizerReconciler = newFinalizerReconciler(
mgr.GetClient(),
utils.SubscriptionFinalizerName,
diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go
index 47f9f945df..fcb61bc3ab 100644
--- a/internal/management/controller/subscription_controller_sql.go
+++ b/internal/management/controller/subscription_controller_sql.go
@@ -32,7 +32,7 @@ func (r *SubscriptionReconciler) alignSubscription(
obj *apiv1.Subscription,
connString string,
) error {
- db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName)
+ db, err := r.getDB(obj.Spec.DBName)
if err != nil {
return fmt.Errorf("while getting DB connection: %w", err)
}
diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go
new file mode 100644
index 0000000000..f6afdc0c4e
--- /dev/null
+++ b/internal/management/controller/subscription_controller_test.go
@@ -0,0 +1,397 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/jackc/pgx/v5"
+ "github.com/lib/pq"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+const subscriptionDetectionQuery = `SELECT count(*)
+ FROM pg_subscription
+ WHERE subname = $1`
+
+var _ = Describe("Managed subscription controller tests", func() {
+ var (
+ dbMock sqlmock.Sqlmock
+ db *sql.DB
+ subscription *apiv1.Subscription
+ cluster *apiv1.Cluster
+ r *SubscriptionReconciler
+ fakeClient client.Client
+ connString string
+ err error
+ )
+
+ BeforeEach(func() {
+ cluster = &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster-example",
+ Namespace: "default",
+ },
+ Status: apiv1.ClusterStatus{
+ CurrentPrimary: "cluster-example-1",
+ TargetPrimary: "cluster-example-1",
+ },
+ Spec: apiv1.ClusterSpec{
+ ExternalClusters: apiv1.ExternalClusterList{
+ apiv1.ExternalCluster{
+ Name: "cluster-other",
+ ConnectionParameters: map[string]string{
+ "host": "localhost",
+ },
+ },
+ },
+ },
+ }
+ subscription = &apiv1.Subscription{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sub-one",
+ Namespace: "default",
+ Generation: 1,
+ },
+ Spec: apiv1.SubscriptionSpec{
+ ClusterRef: corev1.LocalObjectReference{
+ Name: cluster.Name,
+ },
+ ReclaimPolicy: apiv1.SubscriptionReclaimDelete,
+ Name: "sub-one",
+ DBName: "app",
+ PublicationName: "pub-all",
+ PublicationDBName: "app",
+ ExternalClusterName: "cluster-other",
+ },
+ }
+ connString, err = getSubscriptionConnectionString(cluster, "cluster-other", "app")
+ Expect(err).ToNot(HaveOccurred())
+
+ db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+
+ pgInstance := postgres.NewInstance().
+ WithNamespace("default").
+ WithPodName("cluster-example-1").
+ WithClusterName("cluster-example")
+
+ fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithObjects(cluster, subscription).
+ WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Subscription{}).
+ Build()
+
+ r = &SubscriptionReconciler{
+ Client: fakeClient,
+ Scheme: schemeBuilder.BuildWithAllKnownScheme(),
+ instance: pgInstance,
+ getDB: func(_ string) (*sql.DB, error) {
+ return db, nil
+ },
+ }
+ r.finalizerReconciler = newFinalizerReconciler(
+ fakeClient,
+ utils.SubscriptionFinalizerName,
+ r.evaluateDropSubscription,
+ )
+ })
+
+ AfterEach(func() {
+ Expect(dbMock.ExpectationsWereMet()).To(Succeed())
+ })
+
+ It("adds finalizer and sets status ready on success", func(ctx SpecContext) {
+ noHits := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name).
+ WillReturnRows(noHits)
+
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s",
+ pgx.Identifier{subscription.Spec.Name}.Sanitize(),
+ pq.QuoteLiteral(connString),
+ pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
+ Namespace: subscription.GetNamespace(),
+ Name: subscription.GetName(),
+ }})
+ Expect(err).ToNot(HaveOccurred())
+ err = fakeClient.Get(ctx, client.ObjectKey{
+ Namespace: subscription.GetNamespace(),
+ Name: subscription.GetName(),
+ }, subscription)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(subscription.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(subscription.GetStatusMessage()).Should(BeEmpty())
+ Expect(subscription.GetFinalizers()).NotTo(BeEmpty())
+ })
+
+ It("subscription object inherits error after patching", func(ctx SpecContext) {
+ expectedError := fmt.Errorf("no permission")
+ oneHit := sqlmock.NewRows([]string{""}).AddRow("1")
+ dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name).
+ WillReturnRows(oneHit)
+
+ expectedQuery := fmt.Sprintf("ALTER SUBSCRIPTION %s SET PUBLICATION %s",
+ pgx.Identifier{subscription.Spec.Name}.Sanitize(),
+ pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError)
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(subscription.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(subscription.Status.Message).Should(ContainSubstring(expectedError.Error()))
+ })
+
+ When("reclaim policy is delete", func() {
+ It("on deletion it removes finalizers and drops the subscription", func(ctx SpecContext) {
+ // Mocking detection of subscriptions
+ expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name).
+ WillReturnRows(expectedValue)
+
+ // Mocking create subscription
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s",
+ pgx.Identifier{subscription.Spec.Name}.Sanitize(),
+ pq.QuoteLiteral(connString),
+ pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ // Mocking Drop subscription
+ expectedDrop := fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s",
+ pgx.Identifier{subscription.Spec.Name}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1))
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Plain successful reconciliation, finalizers have been created
+ Expect(subscription.GetFinalizers()).NotTo(BeEmpty())
+ Expect(subscription.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(subscription.Status.Message).Should(BeEmpty())
+
+ // The next 2 lines are a hacky bit to make sure the next reconciler
+ // call doesn't skip on account of Generation == ObservedGeneration.
+ // See fake.Client known issues with `Generation`
+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
+ subscription.SetGeneration(subscription.GetGeneration() + 1)
+ Expect(fakeClient.Update(ctx, subscription)).To(Succeed())
+
+ // We now look at the behavior when we delete the Database object
+ Expect(fakeClient.Delete(ctx, subscription)).To(Succeed())
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).To(HaveOccurred())
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ })
+ })
+
+ When("reclaim policy is retain", func() {
+ It("on deletion it removes finalizers and does NOT drop the subscription", func(ctx SpecContext) {
+ subscription.Spec.ReclaimPolicy = apiv1.SubscriptionReclaimRetain
+ Expect(fakeClient.Update(ctx, subscription)).To(Succeed())
+
+ // Mocking Detect subscription
+ expectedValue := sqlmock.NewRows([]string{""}).AddRow("0")
+ dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name).
+ WillReturnRows(expectedValue)
+
+ // Mocking Create subscription
+ expectedCreate := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s",
+ pgx.Identifier{subscription.Spec.Name}.Sanitize(),
+ pq.QuoteLiteral(connString),
+ pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate)
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Plain successful reconciliation, finalizers have been created
+ Expect(subscription.GetFinalizers()).NotTo(BeEmpty())
+ Expect(subscription.Status.Applied).Should(HaveValue(BeTrue()))
+ Expect(subscription.Status.Message).Should(BeEmpty())
+
+ // The next 2 lines are a hacky bit to make sure the next reconciler
+ // call doesn't skip on account of Generation == ObservedGeneration.
+ // See fake.Client known issues with `Generation`
+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder
+ subscription.SetGeneration(subscription.GetGeneration() + 1)
+ Expect(fakeClient.Update(ctx, subscription)).To(Succeed())
+
+ // We now look at the behavior when we delete the Database object
+ Expect(fakeClient.Delete(ctx, subscription)).To(Succeed())
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).To(HaveOccurred())
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
+ })
+ })
+
+ It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) {
+ // Since the fakeClient has the `cluster-example` cluster, let's reference
+ // another cluster `cluster-other` that is not found by the fakeClient
+ pgInstance := postgres.NewInstance().
+ WithNamespace("default").
+ WithPodName("cluster-other-1").
+ WithClusterName("cluster-other")
+
+ r = &SubscriptionReconciler{
+ Client: fakeClient,
+ Scheme: schemeBuilder.BuildWithAllKnownScheme(),
+ instance: pgInstance,
+ getDB: func(_ string) (*sql.DB, error) {
+ return db, nil
+ },
+ }
+
+ // Updating the subscription object to reference the newly created Cluster
+ subscription.Spec.ClusterRef.Name = "cluster-other"
+ Expect(fakeClient.Update(ctx, subscription)).To(Succeed())
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(subscription.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(subscription.Status.Message).Should(ContainSubstring(
+ fmt.Sprintf("%q not found", subscription.Spec.ClusterRef.Name)))
+ })
+
+ It("skips reconciliation if subscription object isn't found (deleted subscription)", func(ctx SpecContext) {
+ // Initialize a new subscription but without creating it in the K8S Cluster
+ otherSubscription := &apiv1.Subscription{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sub-other",
+ Namespace: "default",
+ Generation: 1,
+ },
+ Spec: apiv1.SubscriptionSpec{
+ ClusterRef: corev1.LocalObjectReference{
+ Name: cluster.Name,
+ },
+ Name: "sub-one",
+ },
+ }
+
+ // Reconcile the subscription that hasn't been created in the K8S Cluster
+ result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
+ Namespace: otherSubscription.Namespace,
+ Name: otherSubscription.Name,
+ }})
+
+ // Expect the reconciler to exit silently, since the object doesn't exist
+ Expect(err).ToNot(HaveOccurred())
+ Expect(result).Should(BeZero()) // nothing to do, since the subscription is being deleted
+ })
+
+ It("marks as failed if the target subscription is already being managed", func(ctx SpecContext) {
+ // Let's force the subscription to have a past reconciliation
+ subscription.Status.ObservedGeneration = 2
+ Expect(fakeClient.Status().Update(ctx, subscription)).To(Succeed())
+
+ // A new subscription Object targeting the same "sub-one"
+ subDuplicate := &apiv1.Subscription{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sub-duplicate",
+ Namespace: "default",
+ Generation: 1,
+ },
+ Spec: apiv1.SubscriptionSpec{
+ ClusterRef: corev1.LocalObjectReference{
+ Name: cluster.Name,
+ },
+ Name: "sub-one",
+ PublicationName: "pub-all",
+ PublicationDBName: "app",
+ ExternalClusterName: "cluster-other",
+ },
+ }
+
+ // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed())
+ Expect(fakeClient.Create(ctx, subDuplicate)).To(Succeed())
+
+ err = reconcileSubscription(ctx, fakeClient, r, subDuplicate)
+ Expect(err).ToNot(HaveOccurred())
+
+ expectedError := fmt.Sprintf("%q is already managed by object %q",
+ subDuplicate.Spec.Name, subscription.Name)
+ Expect(subDuplicate.Status.Applied).Should(HaveValue(BeFalse()))
+ Expect(subDuplicate.Status.Message).Should(ContainSubstring(expectedError))
+ })
+
+ It("properly signals a subscription is on a replica cluster", func(ctx SpecContext) {
+ initialCluster := cluster.DeepCopy()
+ cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{
+ Enabled: ptr.To(true),
+ }
+ Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed())
+
+ err = reconcileSubscription(ctx, fakeClient, r, subscription)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(subscription.Status.Applied).Should(BeNil())
+ Expect(subscription.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary"))
+ })
+})
+
+func reconcileSubscription(
+ ctx context.Context,
+ fakeClient client.Client,
+ r *SubscriptionReconciler,
+ subscription *apiv1.Subscription,
+) error {
+ GinkgoT().Helper()
+ _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{
+ Namespace: subscription.GetNamespace(),
+ Name: subscription.GetName(),
+ }})
+ Expect(err).ToNot(HaveOccurred())
+ return fakeClient.Get(ctx, client.ObjectKey{
+ Namespace: subscription.GetNamespace(),
+ Name: subscription.GetName(),
+ }, subscription)
+}
From 3e82045b6a5dd00445c2abd62c703b938a7b9710 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 17 Dec 2024 11:42:22 +0100
Subject: [PATCH 234/836] fix(deps): update kubernetes patches (main) (#6236)
https://github.com/kubernetes/utils `6fe5fd8` -> `24370be`
https://github.com/kubernetes-sigs/controller-runtime `v0.19.2` -> `v0.19.3`
---
go.mod | 5 ++---
go.sum | 10 ++++------
2 files changed, 6 insertions(+), 9 deletions(-)
diff --git a/go.mod b/go.mod
index 980261fede..b325745c1b 100644
--- a/go.mod
+++ b/go.mod
@@ -45,8 +45,8 @@ require (
k8s.io/apimachinery v0.31.3
k8s.io/cli-runtime v0.31.3
k8s.io/client-go v0.31.3
- k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078
- sigs.k8s.io/controller-runtime v0.19.2
+ k8s.io/utils v0.0.0-20241210054802-24370beab758
+ sigs.k8s.io/controller-runtime v0.19.3
sigs.k8s.io/yaml v1.4.0
)
@@ -57,7 +57,6 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/fatih/color v1.17.0 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
diff --git a/go.sum b/go.sum
index 2140149042..721058d0b3 100644
--- a/go.sum
+++ b/go.sum
@@ -39,8 +39,6 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
@@ -292,10 +290,10 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo=
k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA=
-k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno=
-k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8=
-sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
+k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
+k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
+sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU=
From 5fd41abeebf614e3217e73318a9ba95ee1e8ccf3 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 17 Dec 2024 17:46:43 +0100
Subject: [PATCH 235/836] fix(deps): update all non-major go dependencies
(main) (#6333)
https://github.com/goreleaser/goreleaser `v2.4.8` -> `v2.5.0`
https://github.com/grpc-ecosystem/go-grpc-middleware `v2.1.0` -> `v2.2.0`
https://github.com/kubernetes-csi/external-snapshotter `v8.0.0` -> `v8.2.0`
https://github.com/onsi/gomega `v1.36.0` -> `v1.36.1`
golang.org/x/term `v0.26.0` -> `v0.27.0`
https://github.com/grpc/grpc-go `v1.68.0` -> `v1.69.0`
---
Makefile | 2 +-
go.mod | 14 +++++++-------
go.sum | 40 ++++++++++++++++++++++++++--------------
3 files changed, 34 insertions(+), 22 deletions(-)
diff --git a/Makefile b/Makefile
index 6a80924a5a..c4f7a65e3d 100644
--- a/Makefile
+++ b/Makefile
@@ -43,7 +43,7 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
-GORELEASER_VERSION ?= v2.4.8
+GORELEASER_VERSION ?= v2.5.0
SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.38.0
diff --git a/go.mod b/go.mod
index b325745c1b..4688667712 100644
--- a/go.mod
+++ b/go.mod
@@ -17,16 +17,16 @@ require (
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
- github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0
+ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0
github.com/jackc/pgx/v5 v5.7.1
github.com/jackc/puddle/v2 v2.2.2
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
- github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0
+ github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0
github.com/lib/pq v1.10.9
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
github.com/onsi/ginkgo/v2 v2.22.0
- github.com/onsi/gomega v1.36.0
+ github.com/onsi/gomega v1.36.1
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
@@ -37,8 +37,8 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/term v0.26.0
- google.golang.org/grpc v1.68.0
+ golang.org/x/term v0.27.0
+ google.golang.org/grpc v1.69.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.3
k8s.io/apiextensions-apiserver v0.31.3
@@ -107,12 +107,12 @@ require (
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.27.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
diff --git a/go.sum b/go.sum
index 721058d0b3..d7509f0515 100644
--- a/go.sum
+++ b/go.sum
@@ -45,6 +45,8 @@ github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8b
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
@@ -81,8 +83,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk=
-github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -110,8 +112,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw=
-github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y=
+github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 h1:Q3jQ1NkFqv5o+F8dMmHd8SfEmlcwNeo1immFApntEwE=
+github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
@@ -146,8 +148,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
-github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
-github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
+github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -196,6 +198,16 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.starlark.net v0.0.0-20240925182052-1207426daebd h1:S+EMisJOHklQxnS3kqsY8jl2y5aF0FDEdcLnOw3q22E=
go.starlark.net v0.0.0-20240925182052-1207426daebd/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -234,10 +246,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
-golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
-golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
@@ -256,10 +268,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
-google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
-google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
+google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
+google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
From d0964d50ba0c473258e6bba9e6ac61252ca75582 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 17 Dec 2024 18:19:25 +0100
Subject: [PATCH 236/836] chore(deps): update
agilepathway/pull-request-label-checker docker tag to v1.6.60 (main) (#6339)
---
.github/workflows/require-labels.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml
index e0da0f8728..1bbbfb1d23 100644
--- a/.github/workflows/require-labels.yml
+++ b/.github/workflows/require-labels.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-24.04
steps:
- name: Require labels
- uses: docker://agilepathway/pull-request-label-checker:v1.6.56
+ uses: docker://agilepathway/pull-request-label-checker:v1.6.60
with:
any_of: "ok to merge :ok_hand:"
none_of: "do not merge"
From aed3c1398b6e25d1e6c8135c26e2ddd0066a28d4 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 18 Dec 2024 14:16:33 +0100
Subject: [PATCH 237/836] fix(deps): update
github.com/cloudnative-pg/barman-cloud digest to 134c7de (main) (#6331)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 4688667712..bf663e9fe4 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
github.com/avast/retry-go/v4 v4.6.0
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
- github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258
+ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
diff --git a/go.sum b/go.sum
index d7509f0515..7a990938f9 100644
--- a/go.sum
+++ b/go.sum
@@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54=
github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
-github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258 h1:B/Wncxl/OXrXJUHHtBCyxE//6FdIxznERfzPMsNHWfw=
-github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
+github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU=
+github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs=
From e4913b240729e3e703bd915643cfc4e5224701e6 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 18 Dec 2024 14:55:28 +0100
Subject: [PATCH 238/836] chore(deps): update kubernetes csi (main) (#6343)
https://github.com/kubernetes-csi/external-snapshotter `v8.1.0` -> `v8.2.0`
https://github.com/rook/rook `v1.15.6` -> `v1.16.0`
---
.github/workflows/continuous-delivery.yml | 4 ++--
hack/setup-cluster.sh | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 04c3cd31ef..9d809c8802 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -37,8 +37,8 @@ env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.25.0"
- ROOK_VERSION: "v1.15.6"
- EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0"
+ ROOK_VERSION: "v1.16.0"
+ EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
BUILD_PUSH_CACHE_FROM: ""
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index 698baa2e3f..a0166bca71 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -27,7 +27,7 @@ fi
KIND_NODE_DEFAULT_VERSION=v1.31.2
K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
-EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0
+EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
EXTERNAL_PROVISIONER_VERSION=v5.1.0
EXTERNAL_RESIZER_VERSION=v1.12.0
EXTERNAL_ATTACHER_VERSION=v4.7.0
From e22e7e9fdf7ee3e63d425a428b538fe76b8bbe22 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Wed, 18 Dec 2024 15:27:19 +0100
Subject: [PATCH 239/836] fix: key collision in structured logs (#6324)
Inside the logs we found the name `name` as a key for a couple of values, now we
assign the proper keys like `jobName` and `podName`
Closes #6321
Signed-off-by: Jaime Silvela
---
internal/controller/cluster_create.go | 2 +-
pkg/management/postgres/webserver/client/remote/instance.go | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go
index 280fe3361d..39029dd44f 100644
--- a/internal/controller/cluster_create.go
+++ b/internal/controller/cluster_create.go
@@ -1166,7 +1166,7 @@ func (r *ClusterReconciler) createPrimaryInstance(
}
contextLogger.Info("Creating new Job",
- "name", job.Name,
+ "jobName", job.Name,
"primary", true)
utils.SetOperatorVersion(&job.ObjectMeta, versions.Version)
diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go
index b83111f850..6c158402fe 100644
--- a/pkg/management/postgres/webserver/client/remote/instance.go
+++ b/pkg/management/postgres/webserver/client/remote/instance.go
@@ -177,7 +177,7 @@ func (r *instanceClientImpl) GetStatusFromInstances(
for idx := range status.Items {
if status.Items[idx].Error != nil {
log.FromContext(ctx).Info("Cannot extract Pod status",
- "name", status.Items[idx].Pod.Name,
+ "podName", status.Items[idx].Pod.Name,
"error", status.Items[idx].Error.Error())
}
}
From 67cc5473f3ca30f47d12860a01d57d29b2c47039 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Wed, 18 Dec 2024 17:47:20 +0100
Subject: [PATCH 240/836] fix: use optimistic locking when updating conditions
(#6328)
Kubernetes does not implement strategic merge for CRDs, and every JSON
merge patch will replace the condition set with a new one.
It is possible for a proposed patch to start from a Cluster that is not
up-to-date, and in that case, the conditions will be reverted back to an
older status.
This patch fixes this race condition by encapsulating this operator with
a merge patch that requires optimistic locking and retrying the
conditions update when needed.
Fixes: #6317
Signed-off-by: Leonardo Cecchi
Signed-off-by: Armando Ruocco
Signed-off-by: Marco Nenciarini
Co-authored-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
---
api/v1/cluster_conditions.go | 8 +-
internal/controller/backup_controller.go | 30 +++++--
pkg/conditions/conditions.go | 49 -----------
pkg/conditions/doc.go | 19 -----
pkg/management/postgres/backup.go | 24 +++---
pkg/management/postgres/backup_test.go | 1 +
pkg/management/postgres/webserver/local.go | 15 ++--
.../postgres/webserver/plugin_backup.go | 21 ++---
.../replicaclusterswitch/reconciler.go | 79 ++++++++++--------
pkg/resources/status/conditions.go | 81 +++++++++++++++++++
pkg/resources/status/phase.go | 77 +++++++++---------
pkg/resources/status/update.go | 73 +++++++++++++++++
12 files changed, 305 insertions(+), 172 deletions(-)
delete mode 100644 pkg/conditions/conditions.go
delete mode 100644 pkg/conditions/doc.go
create mode 100644 pkg/resources/status/conditions.go
create mode 100644 pkg/resources/status/update.go
diff --git a/api/v1/cluster_conditions.go b/api/v1/cluster_conditions.go
index 9d1e83947a..ae9844632d 100644
--- a/api/v1/cluster_conditions.go
+++ b/api/v1/cluster_conditions.go
@@ -22,7 +22,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
var (
// BackupSucceededCondition is added to a backup
// when it was completed correctly
- BackupSucceededCondition = &metav1.Condition{
+ BackupSucceededCondition = metav1.Condition{
Type: string(ConditionBackup),
Status: metav1.ConditionTrue,
Reason: string(ConditionReasonLastBackupSucceeded),
@@ -31,7 +31,7 @@ var (
// BackupStartingCondition is added to a backup
// when it started
- BackupStartingCondition = &metav1.Condition{
+ BackupStartingCondition = metav1.Condition{
Type: string(ConditionBackup),
Status: metav1.ConditionFalse,
Reason: string(ConditionBackupStarted),
@@ -40,8 +40,8 @@ var (
// BuildClusterBackupFailedCondition builds
// ConditionReasonLastBackupFailed condition
- BuildClusterBackupFailedCondition = func(err error) *metav1.Condition {
- return &metav1.Condition{
+ BuildClusterBackupFailedCondition = func(err error) metav1.Condition {
+ return metav1.Condition{
Type: string(ConditionBackup),
Status: metav1.ConditionFalse,
Reason: string(ConditionReasonLastBackupFailed),
diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go
index 4506c08737..29c6aea6f9 100644
--- a/internal/controller/backup_controller.go
+++ b/internal/controller/backup_controller.go
@@ -45,11 +45,11 @@ import (
cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/backup/volumesnapshot"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
+ resourcestatus "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -417,7 +417,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup(
}
}
- if errCond := conditions.Patch(ctx, r.Client, cluster, apiv1.BackupStartingCondition); errCond != nil {
+ if errCond := resourcestatus.PatchConditionsWithOptimisticLock(
+ ctx,
+ r.Client,
+ cluster,
+ apiv1.BackupStartingCondition,
+ ); errCond != nil {
contextLogger.Error(errCond, "Error while updating backup condition (backup starting)")
}
@@ -440,7 +445,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup(
// and un-fence the Pod
contextLogger.Error(err, "while executing snapshot backup")
// Update backup status in cluster conditions
- if errCond := conditions.Patch(ctx, r.Client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil {
+ if errCond := resourcestatus.PatchConditionsWithOptimisticLock(
+ ctx,
+ r.Client,
+ cluster,
+ apiv1.BuildClusterBackupFailedCondition(err),
+ ); errCond != nil {
contextLogger.Error(errCond, "Error while updating backup condition (backup snapshot failed)")
}
@@ -453,7 +463,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup(
return res, nil
}
- if err := conditions.Patch(ctx, r.Client, cluster, apiv1.BackupSucceededCondition); err != nil {
+ if err := resourcestatus.PatchConditionsWithOptimisticLock(
+ ctx,
+ r.Client,
+ cluster,
+ apiv1.BackupSucceededCondition,
+ ); err != nil {
contextLogger.Error(err, "Can't update the cluster with the completed snapshot backup data")
}
@@ -633,7 +648,12 @@ func startInstanceManagerBackup(
status.CommandError = stdout
// Update backup status in cluster conditions
- if errCond := conditions.Patch(ctx, client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil {
+ if errCond := resourcestatus.PatchConditionsWithOptimisticLock(
+ ctx,
+ client,
+ cluster,
+ apiv1.BuildClusterBackupFailedCondition(err),
+ ); errCond != nil {
log.FromContext(ctx).Error(errCond, "Error while updating backup condition (backup failed)")
}
return postgres.PatchBackupStatusAndRetry(ctx, client, backup)
diff --git a/pkg/conditions/conditions.go b/pkg/conditions/conditions.go
deleted file mode 100644
index 768ac02d49..0000000000
--- a/pkg/conditions/conditions.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package conditions
-
-import (
- "context"
-
- "k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
-)
-
-// Patch will patch a particular condition in cluster status.
-func Patch(
- ctx context.Context,
- c client.Client,
- cluster *apiv1.Cluster,
- condition *metav1.Condition,
-) error {
- if cluster == nil || condition == nil {
- return nil
- }
-
- existingCluster := cluster.DeepCopy()
- if changed := meta.SetStatusCondition(&cluster.Status.Conditions, *condition); changed {
- // To avoid conflict using patch instead of update
- if err := c.Status().Patch(ctx, cluster, client.MergeFrom(existingCluster)); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/pkg/conditions/doc.go b/pkg/conditions/doc.go
deleted file mode 100644
index acecc6fc10..0000000000
--- a/pkg/conditions/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package conditions contains functions useful to update the conditions
-// on the resources managed by the operator
-package conditions
diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go
index 46ff344b29..f79c4d4a9a 100644
--- a/pkg/management/postgres/backup.go
+++ b/pkg/management/postgres/backup.go
@@ -32,6 +32,7 @@ import (
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time"
+ "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -41,9 +42,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status"
// this is needed to correctly open the sql connection with the pgx driver
_ "github.com/jackc/pgx/v5/stdlib"
@@ -187,12 +188,15 @@ func (b *BackupCommand) run(ctx context.Context) {
// add backup failed condition to the cluster
if failErr := b.retryWithRefreshedCluster(ctx, func() error {
- origCluster := b.Cluster.DeepCopy()
-
- meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(err))
-
- b.Cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339)
- return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster))
+ return status.PatchWithOptimisticLock(
+ ctx,
+ b.Client,
+ b.Cluster,
+ func(cluster *apiv1.Cluster) {
+ meta.SetStatusCondition(&cluster.Status.Conditions, apiv1.BuildClusterBackupFailedCondition(err))
+ cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339)
+ },
+ )
}); failErr != nil {
b.Log.Error(failErr, "while setting cluster condition for failed backup")
// We do not terminate here because it's more important to properly handle
@@ -210,7 +214,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error {
// Update backup status in cluster conditions on startup
if err := b.retryWithRefreshedCluster(ctx, func() error {
- return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition)
+ return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition)
}); err != nil {
b.Log.Error(err, "Error changing backup condition (backup started)")
// We do not terminate here because we could still have a good backup
@@ -256,7 +260,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error {
// Update backup status in cluster conditions on backup completion
if err := b.retryWithRefreshedCluster(ctx, func() error {
- return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition)
+ return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition)
}); err != nil {
b.Log.Error(err, "Can't update the cluster with the completed backup data")
}
@@ -303,7 +307,7 @@ func (b *BackupCommand) backupMaintenance(ctx context.Context) {
data.GetLastSuccessfulBackupTime(),
)
- if reflect.DeepEqual(origCluster.Status, b.Cluster.Status) {
+ if equality.Semantic.DeepEqual(origCluster.Status, b.Cluster.Status) {
return nil
}
return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster))
diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go
index 18307bc791..8ff4796c72 100644
--- a/pkg/management/postgres/backup_test.go
+++ b/pkg/management/postgres/backup_test.go
@@ -125,6 +125,7 @@ var _ = Describe("testing backup command", func() {
Client: fake.NewClientBuilder().
WithScheme(scheme.BuildWithAllKnownScheme()).
WithObjects(cluster, backup).
+ WithStatusSubresource(cluster, backup).
Build(),
Recorder: &record.FakeRecorder{},
Env: os.Environ(),
diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go
index 0d15f851ca..27b15db9e1 100644
--- a/pkg/management/postgres/webserver/local.go
+++ b/pkg/management/postgres/webserver/local.go
@@ -33,9 +33,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/management/cache"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -245,9 +245,9 @@ type ArchiveStatusRequest struct {
Error string `json:"error,omitempty"`
}
-func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() *metav1.Condition {
+func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() metav1.Condition {
if asr.Error != "" {
- return &metav1.Condition{
+ return metav1.Condition{
Type: string(apiv1.ConditionContinuousArchiving),
Status: metav1.ConditionFalse,
Reason: string(apiv1.ConditionReasonContinuousArchivingFailing),
@@ -255,7 +255,7 @@ func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() *metav1.Condi
}
}
- return &metav1.Condition{
+ return metav1.Condition{
Type: string(apiv1.ConditionContinuousArchiving),
Status: metav1.ConditionTrue,
Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess),
@@ -283,7 +283,12 @@ func (ws *localWebserverEndpoints) setWALArchiveStatusCondition(w http.ResponseW
return
}
- if errCond := conditions.Patch(ctx, ws.typedClient, cluster, asr.getContinuousArchivingCondition()); errCond != nil {
+ if errCond := status.PatchConditionsWithOptimisticLock(
+ ctx,
+ ws.typedClient,
+ cluster,
+ asr.getContinuousArchivingCondition(),
+ ); errCond != nil {
contextLogger.Error(errCond, "Error changing wal archiving condition",
"condition", asr.getContinuousArchivingCondition())
http.Error(
diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go
index 5d1ad1562b..2e6f58f5b6 100644
--- a/pkg/management/postgres/webserver/plugin_backup.go
+++ b/pkg/management/postgres/webserver/plugin_backup.go
@@ -32,9 +32,9 @@ import (
pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status"
)
// PluginBackupCommand represent a backup command that is being executed
@@ -102,7 +102,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) {
// Update backup status in cluster conditions on startup
if err := b.retryWithRefreshedCluster(ctx, func() error {
- return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition)
+ return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition)
}); err != nil {
contextLogger.Error(err, "Error changing backup condition (backup started)")
// We do not terminate here because we could still have a good backup
@@ -152,7 +152,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) {
// Update backup status in cluster conditions on backup completion
if err := b.retryWithRefreshedCluster(ctx, func() error {
- return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition)
+ return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition)
}); err != nil {
contextLogger.Error(err, "Can't update the cluster with the completed backup data")
}
@@ -176,12 +176,15 @@ func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure er
// add backup failed condition to the cluster
if failErr := b.retryWithRefreshedCluster(ctx, func() error {
- origCluster := b.Cluster.DeepCopy()
-
- meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(failure))
-
- b.Cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339)
- return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster))
+ return status.PatchWithOptimisticLock(
+ ctx,
+ b.Client,
+ b.Cluster,
+ func(cluster *apiv1.Cluster) {
+ meta.SetStatusCondition(&cluster.Status.Conditions, apiv1.BuildClusterBackupFailedCondition(failure))
+ cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339)
+ },
+ )
}); failErr != nil {
contextLogger.Error(failErr, "while setting cluster condition for failed backup")
}
diff --git a/pkg/reconciler/replicaclusterswitch/reconciler.go b/pkg/reconciler/replicaclusterswitch/reconciler.go
index 15342e9adc..fd185a7e49 100644
--- a/pkg/reconciler/replicaclusterswitch/reconciler.go
+++ b/pkg/reconciler/replicaclusterswitch/reconciler.go
@@ -31,6 +31,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -89,28 +90,33 @@ func startTransition(ctx context.Context, cli client.Client, cluster *apiv1.Clus
return nil, fmt.Errorf("while fencing primary cluster to demote it: %w", err)
}
- origCluster := cluster.DeepCopy()
- meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
- Type: conditionDesignatedPrimaryTransition,
- Status: metav1.ConditionFalse,
- Reason: "ReplicaClusterAfterCreation",
- Message: "Enabled external cluster after a node was generated",
- })
- meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
- Type: conditionFence,
- Status: metav1.ConditionTrue,
- Reason: "ReplicaClusterAfterCreation",
- Message: "Enabled external cluster after a node was generated",
- })
- meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
- Type: ConditionReplicaClusterSwitch,
- Status: metav1.ConditionFalse,
- Reason: "ReplicaEnabledSetTrue",
- Message: "Starting the Replica cluster transition",
- })
-
- cluster.Status.SwitchReplicaClusterStatus.InProgress = true
- if err := cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)); err != nil {
+ if err := status.PatchWithOptimisticLock(
+ ctx,
+ cli,
+ cluster,
+ func(cluster *apiv1.Cluster) {
+ meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
+ Type: conditionDesignatedPrimaryTransition,
+ Status: metav1.ConditionFalse,
+ Reason: "ReplicaClusterAfterCreation",
+ Message: "Enabled external cluster after a node was generated",
+ })
+ meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
+ Type: conditionFence,
+ Status: metav1.ConditionTrue,
+ Reason: "ReplicaClusterAfterCreation",
+ Message: "Enabled external cluster after a node was generated",
+ })
+ meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
+ Type: ConditionReplicaClusterSwitch,
+ Status: metav1.ConditionFalse,
+ Reason: "ReplicaEnabledSetTrue",
+ Message: "Starting the Replica cluster transition",
+ })
+
+ cluster.Status.SwitchReplicaClusterStatus.InProgress = true
+ },
+ ); err != nil {
return nil, err
}
@@ -132,18 +138,23 @@ func cleanupTransitionMetadata(ctx context.Context, cli client.Client, cluster *
return err
}
}
- origCluster := cluster.DeepCopy()
- meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionDesignatedPrimaryTransition)
- meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionFence)
- meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
- Type: ConditionReplicaClusterSwitch,
- Status: metav1.ConditionTrue,
- Reason: "ReplicaEnabledSetTrue",
- Message: "Completed the Replica cluster transition",
- })
- cluster.Status.SwitchReplicaClusterStatus.InProgress = false
-
- return cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster))
+
+ return status.PatchWithOptimisticLock(
+ ctx,
+ cli,
+ cluster,
+ func(cluster *apiv1.Cluster) {
+ meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionDesignatedPrimaryTransition)
+ meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionFence)
+ meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
+ Type: ConditionReplicaClusterSwitch,
+ Status: metav1.ConditionTrue,
+ Reason: "ReplicaEnabledSetTrue",
+ Message: "Completed the Replica cluster transition",
+ })
+ cluster.Status.SwitchReplicaClusterStatus.InProgress = false
+ },
+ )
}
func reconcileDemotionToken(
diff --git a/pkg/resources/status/conditions.go b/pkg/resources/status/conditions.go
new file mode 100644
index 0000000000..54b09a056b
--- /dev/null
+++ b/pkg/resources/status/conditions.go
@@ -0,0 +1,81 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package status
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+// PatchConditionsWithOptimisticLock will update a particular condition in cluster status.
+// This function may update the conditions in the passed cluster
+// with the latest ones that were found from the API server.
+// This function is needed because Kubernetes still doesn't support strategic merge
+// for CRDs (see https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/).
+func PatchConditionsWithOptimisticLock(
+ ctx context.Context,
+ c client.Client,
+ cluster *apiv1.Cluster,
+ conditions ...metav1.Condition,
+) error {
+ if cluster == nil || len(conditions) == 0 {
+ return nil
+ }
+
+ applyConditions := func(cluster *apiv1.Cluster) bool {
+ changed := false
+ for _, c := range conditions {
+ changed = changed || meta.SetStatusCondition(&cluster.Status.Conditions, c)
+ }
+ return changed
+ }
+
+ var currentCluster apiv1.Cluster
+ if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil {
+ return err
+ }
+
+ updatedCluster := currentCluster.DeepCopy()
+ if changed := applyConditions(updatedCluster); !changed {
+ return nil
+ }
+
+ if err := c.Status().Patch(
+ ctx,
+ updatedCluster,
+ client.MergeFromWithOptions(¤tCluster, client.MergeFromWithOptimisticLock{}),
+ ); err != nil {
+ return err
+ }
+
+ cluster.Status.Conditions = updatedCluster.Status.Conditions
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("while updating conditions: %w", err)
+ }
+
+ return nil
+}
diff --git a/pkg/resources/status/phase.go b/pkg/resources/status/phase.go
index 684eefd26c..bac80933c5 100644
--- a/pkg/resources/status/phase.go
+++ b/pkg/resources/status/phase.go
@@ -18,7 +18,7 @@ package status
import (
"context"
- "reflect"
+ "fmt"
"github.com/cloudnative-pg/machinery/pkg/log"
"k8s.io/apimachinery/pkg/api/meta"
@@ -51,47 +51,50 @@ func RegisterPhaseWithOrigCluster(
phase string,
reason string,
) error {
- contextLogger := log.FromContext(ctx)
-
- // we ensure that the modifiedCluster conditions aren't nil before operating
- if modifiedCluster.Status.Conditions == nil {
- modifiedCluster.Status.Conditions = []metav1.Condition{}
+ if err := PatchWithOptimisticLock(
+ ctx,
+ cli,
+ modifiedCluster,
+ func(cluster *apiv1.Cluster) {
+ if cluster.Status.Conditions == nil {
+ cluster.Status.Conditions = []metav1.Condition{}
+ }
+
+ cluster.Status.Phase = phase
+ cluster.Status.PhaseReason = reason
+
+ condition := metav1.Condition{
+ Type: string(apiv1.ConditionClusterReady),
+ Status: metav1.ConditionFalse,
+ Reason: string(apiv1.ClusterIsNotReady),
+ Message: "Cluster Is Not Ready",
+ }
+
+ if cluster.Status.Phase == apiv1.PhaseHealthy {
+ condition = metav1.Condition{
+ Type: string(apiv1.ConditionClusterReady),
+ Status: metav1.ConditionTrue,
+ Reason: string(apiv1.ClusterReady),
+ Message: "Cluster is Ready",
+ }
+ }
+
+ meta.SetStatusCondition(&cluster.Status.Conditions, condition)
+ },
+ ); err != nil {
+ return fmt.Errorf("while updating phase: %w", err)
}
- modifiedCluster.Status.Phase = phase
- modifiedCluster.Status.PhaseReason = reason
+ contextLogger := log.FromContext(ctx)
- condition := metav1.Condition{
- Type: string(apiv1.ConditionClusterReady),
- Status: metav1.ConditionFalse,
- Reason: string(apiv1.ClusterIsNotReady),
- Message: "Cluster Is Not Ready",
- }
+ modifiedPhase := modifiedCluster.Status.Phase
+ origPhase := origCluster.Status.Phase
- if modifiedCluster.Status.Phase == apiv1.PhaseHealthy {
- condition = metav1.Condition{
- Type: string(apiv1.ConditionClusterReady),
- Status: metav1.ConditionTrue,
- Reason: string(apiv1.ClusterReady),
- Message: "Cluster is Ready",
- }
+ if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy {
+ contextLogger.Info("Cluster is not healthy")
}
-
- meta.SetStatusCondition(&modifiedCluster.Status.Conditions, condition)
-
- if !reflect.DeepEqual(origCluster, modifiedCluster) {
- modifiedPhase := modifiedCluster.Status.Phase
- origPhase := origCluster.Status.Phase
-
- if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy {
- contextLogger.Info("Cluster is not healthy")
- }
- if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy {
- contextLogger.Info("Cluster is healthy")
- }
- if err := cli.Status().Patch(ctx, modifiedCluster, client.MergeFrom(origCluster)); err != nil {
- return err
- }
+ if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy {
+ contextLogger.Info("Cluster is healthy")
}
return nil
diff --git a/pkg/resources/status/update.go b/pkg/resources/status/update.go
new file mode 100644
index 0000000000..0543292d9e
--- /dev/null
+++ b/pkg/resources/status/update.go
@@ -0,0 +1,73 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package status
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+// PatchWithOptimisticLock updates the status of the cluster using the passed
+// transaction function.
+// Important: after successfully updating the status, this
+// function refreshes it into the passed cluster
+func PatchWithOptimisticLock(
+ ctx context.Context,
+ c client.Client,
+ cluster *apiv1.Cluster,
+ tx func(cluster *apiv1.Cluster),
+) error {
+ if cluster == nil {
+ return nil
+ }
+
+ if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ var currentCluster apiv1.Cluster
+ if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil {
+ return err
+ }
+
+ updatedCluster := currentCluster.DeepCopy()
+ tx(updatedCluster)
+
+ if equality.Semantic.DeepEqual(currentCluster.Status, updatedCluster.Status) {
+ return nil
+ }
+
+ if err := c.Status().Patch(
+ ctx,
+ updatedCluster,
+ client.MergeFromWithOptions(¤tCluster, client.MergeFromWithOptimisticLock{}),
+ ); err != nil {
+ return err
+ }
+
+ cluster.Status = updatedCluster.Status
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("while updating conditions: %w", err)
+ }
+
+ return nil
+}
From 55bc13852672cd3543bb0d26cd0cef34004ec588 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 18 Dec 2024 22:32:10 +0100
Subject: [PATCH 241/836] chore(deps): update dependency kubernetes-sigs/kind
to v0.26.0 (main) (#6350)
---
.github/workflows/continuous-delivery.yml | 2 +-
.github/workflows/continuous-integration.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 9d809c8802..d29cc443d2 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -36,7 +36,7 @@ on:
env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
- KIND_VERSION: "v0.25.0"
+ KIND_VERSION: "v0.26.0"
ROOK_VERSION: "v1.16.0"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index db7bbb243e..fe8629a5e5 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -19,7 +19,7 @@ env:
GOLANG_VERSION: "1.23.x"
GOLANGCI_LINT_VERSION: "v1.62.2"
KUBEBUILDER_VERSION: "2.3.1"
- KIND_VERSION: "v0.25.0"
+ KIND_VERSION: "v0.26.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
API_DOC_NAME: "cloudnative-pg.v1.md"
SLACK_USERNAME: "cnpg-bot"
From 141c5846a0b3b94fb0f3df3237cb34f5b990c7df Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 19 Dec 2024 09:31:42 +0100
Subject: [PATCH 242/836] fix(deps): update github.com/cloudnative-pg/cnpg-i
digest to cbc4287 (main) (#6332)
---
go.mod | 6 +++---
go.sum | 12 ++++++------
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/go.mod b/go.mod
index bf663e9fe4..20e25fbcee 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
- github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0
+ github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
@@ -38,7 +38,7 @@ require (
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/term v0.27.0
- google.golang.org/grpc v1.69.0
+ google.golang.org/grpc v1.69.2
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.3
k8s.io/apiextensions-apiserver v0.31.3
@@ -113,7 +113,7 @@ require (
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
- google.golang.org/protobuf v1.35.1 // indirect
+ google.golang.org/protobuf v1.36.0 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index 7a990938f9..53e874f1bf 100644
--- a/go.sum
+++ b/go.sum
@@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr
github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU=
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
-github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA=
-github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc=
+github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee h1:PJc4BpPu0b684BrwWzy0B5W/CSqrnUV+jv3PTrSUx8g=
+github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee/go.mod h1:ahVFn+JzYkFfv7Iwpswu4lsuC9yK7zZupM1ssaIKPFI=
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs=
github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
@@ -270,10 +270,10 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
-google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
-google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
-google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
-google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
+google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
From ef1669b1addd9a5e9333095c0ff7ac19632f529d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Niccol=C3=B2=20Fei?=
Date: Thu, 19 Dec 2024 10:25:38 +0100
Subject: [PATCH 243/836] test(e2e): add coverage for Publication and
Subscription features (#6320)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Implement end-to-end tests to verify publication and subscription
functionalities
* Ensure data replication scenarios are thoroughly tested
Closes #6306
Signed-off-by: Niccolò Fei
Signed-off-by: Gabriele Quaresima
Signed-off-by: Marco Nenciarini
Signed-off-by: Armando Ruocco
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: Gabriele Quaresima
Co-authored-by: Marco Nenciarini
Co-authored-by: Armando Ruocco
Co-authored-by: Jonathan Gonzalez V.
---
.github/workflows/continuous-delivery.yml | 8 +-
tests/e2e/asserts_test.go | 97 +++----
tests/e2e/cluster_microservice_test.go | 9 +-
.../declarative_database_management_test.go | 14 +-
.../destination-database.yaml | 1 +
.../declarative_pub_sub/source-database.yaml | 1 +
tests/e2e/managed_roles_test.go | 80 +++---
tests/e2e/publication_subscription_test.go | 252 +++++++++++++-----
tests/labels.go | 6 +-
9 files changed, 289 insertions(+), 179 deletions(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index d29cc443d2..7ce21a1d15 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -20,10 +20,10 @@ on:
default: '4'
feature_type:
description: >
- Feature Type (disruptive, performance, upgrade, smoke, basic, service-connectivity, self-healing,
- backup-restore, snapshot, operator, observability, replication, plugin, postgres-configuration,
- pod-scheduling, cluster-metadata, recovery, importing-databases, storage, security, maintenance,
- tablespaces)
+ Feature Type (backup-restore, basic, cluster-metadata, declarative-databases, disruptive,
+ importing-databases, maintenance, no-openshift, observability, operator, performance, plugin,
+ pod-scheduling, postgres-configuration, publication-subscription, recovery, replication,
+ security, self-healing, service-connectivity, smoke, snapshot, storage, tablespaces, upgrade)
required: false
log_level:
description: 'Log level for operator (error, warning, info, debug(default), trace)'
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 8c6d459fec..ccf73d21a9 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -32,7 +32,7 @@ import (
"github.com/thoas/go-funk"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/utils/strings/slices"
@@ -504,54 +504,34 @@ func insertRecordIntoTable(tableName string, value int, conn *sql.DB) {
Expect(err).ToNot(HaveOccurred())
}
-// AssertDatabaseExists assert if database exists
-func AssertDatabaseExists(pod *corev1.Pod, databaseName string, expectedValue bool) {
- By(fmt.Sprintf("verifying if database %v exists", databaseName), func() {
- query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE lower(datname) = lower('%v'));", databaseName)
+func QueryMatchExpectationPredicate(
+ pod *corev1.Pod,
+ dbname testsUtils.DatabaseName,
+ query string,
+ expectedOutput string,
+) func(g Gomega) {
+ return func(g Gomega) {
+ // executor
stdout, stderr, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
- Namespace: pod.Namespace,
- PodName: pod.Name,
- },
- testsUtils.PostgresDBName,
- query)
+ testsUtils.PodLocator{Namespace: pod.Namespace, PodName: pod.Name},
+ dbname,
+ query,
+ )
if err != nil {
GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr)
}
- Expect(err).ToNot(HaveOccurred())
-
- if expectedValue {
- Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t"))
- } else {
- Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f"))
- }
- })
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo(expectedOutput),
+ fmt.Sprintf("expected query %q to return %q", query, expectedOutput))
+ }
}
-// AssertUserExists assert if user exists
-func AssertUserExists(pod *corev1.Pod, userName string, expectedValue bool) {
- By(fmt.Sprintf("verifying if user %v exists", userName), func() {
- query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE lower(rolname) = lower('%v'));", userName)
- Eventually(func(g Gomega) {
- stdout, stderr, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
- Namespace: pod.Namespace,
- PodName: pod.Name,
- },
- testsUtils.PostgresDBName,
- query)
- if err != nil {
- GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr)
- }
- g.Expect(err).ToNot(HaveOccurred())
+func roleExistsQuery(roleName string) string {
+ return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE rolname='%v')", roleName)
+}
- if expectedValue {
- g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t"))
- } else {
- g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f"))
- }
- }, 60).Should(Succeed())
- })
+func databaseExistsQuery(dbName string) string {
+ return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname='%v')", dbName)
}
// AssertDataExpectedCount verifies that an expected amount of rows exists on the table
@@ -832,7 +812,7 @@ func AssertScheduledBackupsAreScheduled(namespace string, backupYAMLPath string,
Name: scheduledBackupName,
}
- Eventually(func() (*v1.Time, error) {
+ Eventually(func() (*metav1.Time, error) {
scheduledBackup := &apiv1.ScheduledBackup{}
err := env.Client.Get(env.Ctx,
scheduledBackupNamespacedName, scheduledBackup)
@@ -891,11 +871,6 @@ func getScheduledBackupCompleteBackupsCount(namespace string, scheduledBackupNam
// AssertPgRecoveryMode verifies if the target pod recovery mode is enabled or disabled
func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) {
By(fmt.Sprintf("verifying that postgres recovery mode is %v", expectedValue), func() {
- stringExpectedValue := "f"
- if expectedValue {
- stringExpectedValue = "t"
- }
-
Eventually(func() (string, error) {
stdOut, stdErr, err := env.ExecQueryInInstancePod(
testsUtils.PodLocator{
@@ -908,10 +883,18 @@ func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) {
GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr)
}
return strings.Trim(stdOut, "\n"), err
- }, 300, 10).Should(BeEquivalentTo(stringExpectedValue))
+ }, 300, 10).Should(BeEquivalentTo(boolPGOutput(expectedValue)))
})
}
+func boolPGOutput(expectedValue bool) string {
+ stringExpectedValue := "f"
+ if expectedValue {
+ stringExpectedValue = "t"
+ }
+ return stringExpectedValue
+}
+
// AssertReplicaModeCluster checks that, after inserting some data in a source cluster,
// a replica cluster can be bootstrapped using pg_basebackup and is properly replicating
// from the source cluster
@@ -991,8 +974,10 @@ func AssertReplicaModeCluster(
// verify the replica database created followed the source database, rather than
// default to the "app" db and user
By("checking that in replica cluster there is no database app and user app", func() {
- AssertDatabaseExists(primaryReplicaCluster, "app", false)
- AssertUserExists(primaryReplicaCluster, "app", false)
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ databaseExistsQuery("app"), "f"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ roleExistsQuery("app"), "f"), 30).Should(Succeed())
})
}
}
@@ -1072,8 +1057,10 @@ func AssertDetachReplicaModeCluster(
By("verifying the replica database doesn't exist in the replica cluster", func() {
// Application database configuration is skipped for replica clusters,
// so we expect these to not be present
- AssertDatabaseExists(primaryReplicaCluster, replicaDatabaseName, false)
- AssertUserExists(primaryReplicaCluster, replicaUserName, false)
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ databaseExistsQuery(replicaDatabaseName), "f"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ roleExistsQuery(replicaUserName), "f"), 30).Should(Succeed())
})
By("writing some new data to the source cluster", func() {
@@ -1684,7 +1671,7 @@ func AssertScheduledBackupsImmediate(namespace, backupYAMLPath, scheduledBackupN
Namespace: namespace,
Name: scheduledBackupName,
}
- Eventually(func() (*v1.Time, error) {
+ Eventually(func() (*metav1.Time, error) {
scheduledBackup := &apiv1.ScheduledBackup{}
err = env.Client.Get(env.Ctx,
scheduledBackupNamespacedName, scheduledBackup)
@@ -2605,7 +2592,7 @@ func AssertBackupConditionTimestampChangedInClusterStatus(
namespace,
clusterName string,
clusterConditionType apiv1.ClusterConditionType,
- lastTransactionTimeStamp *v1.Time,
+ lastTransactionTimeStamp *metav1.Time,
) {
By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() {
Eventually(func() (bool, error) {
diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go
index 6019086aa3..d957e1976f 100644
--- a/tests/e2e/cluster_microservice_test.go
+++ b/tests/e2e/cluster_microservice_test.go
@@ -261,7 +261,8 @@ func assertTableAndDataOnImportedCluster(
})
By("verifying the user named 'micro' on source is not in imported database", func() {
- AssertUserExists(pod, "micro", false)
+ Eventually(QueryMatchExpectationPredicate(pod, testsUtils.PostgresDBName,
+ roleExistsQuery("micro"), "f"), 30).Should(Succeed())
})
})
}
@@ -330,8 +331,10 @@ func assertImportRenamesSelectedDatabase(
importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName)
Expect(err).ToNot(HaveOccurred())
- AssertUserExists(importedPrimaryPod, "db2", false)
- AssertUserExists(importedPrimaryPod, "app", true)
+ Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery("db2"), "f"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery("app"), "t"), 30).Should(Succeed())
})
By("cleaning up the clusters", func() {
diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go
index a7f9c574eb..594bba356c 100644
--- a/tests/e2e/declarative_database_management_test.go
+++ b/tests/e2e/declarative_database_management_test.go
@@ -24,7 +24,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -75,7 +75,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate)
Eventually(func(g Gomega) {
stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ testsUtils.PodLocator{
Namespace: namespace,
PodName: primaryPod,
},
@@ -119,20 +119,22 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertDatabaseExists(primaryPodInfo, dbname, true)
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ databaseExistsQuery(dbname), "t"), 30).Should(Succeed())
assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database)
})
By("removing the Database object", func() {
- Expect(utils.DeleteObject(env, &database)).To(Succeed())
+ Expect(testsUtils.DeleteObject(env, &database)).To(Succeed())
})
By("verifying the retention policy in the postgres database", func() {
primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertDatabaseExists(primaryPodInfo, dbname, retainOnDeletion)
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ databaseExistsQuery(dbname), boolPGOutput(retainOnDeletion)), 30).Should(Succeed())
})
}
@@ -193,7 +195,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
}, 300).WithPolling(10 * time.Second).Should(Succeed())
})
By("deleting the namespace and making sure it succeeds before timeout", func() {
- err := env.DeleteNamespaceAndWait(namespace, 60)
+ err := env.DeleteNamespaceAndWait(namespace, 120)
Expect(err).ToNot(HaveOccurred())
})
})
diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml
index 2a6e122647..d4deace971 100644
--- a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml
+++ b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml
@@ -5,5 +5,6 @@ metadata:
spec:
name: declarative
owner: app
+ databaseReclaimPolicy: delete
cluster:
name: destination-cluster
diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml
index 80d5a4cf27..4ebcae63ee 100644
--- a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml
+++ b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml
@@ -5,5 +5,6 @@ metadata:
spec:
name: declarative
owner: app
+ databaseReclaimPolicy: delete
cluster:
name: source-cluster
diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go
index fc5dd5f314..64fa7bd753 100644
--- a/tests/e2e/managed_roles_test.go
+++ b/tests/e2e/managed_roles_test.go
@@ -24,13 +24,13 @@ import (
"github.com/lib/pq"
corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -92,11 +92,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
) mem ON member = oid
WHERE rolname =` + pq.QuoteLiteral(roleName)
stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ testsUtils.PodLocator{
Namespace: namespace,
PodName: primaryPod,
},
- utils.PostgresDBName,
+ testsUtils.PostgresDBName,
query)
if err != nil {
return []string{ERROR}
@@ -112,11 +112,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
Eventually(func() string {
stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ testsUtils.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
},
- utils.PostgresDBName,
+ testsUtils.PostgresDBName,
query)
if err != nil {
return ""
@@ -139,10 +139,14 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertUserExists(primaryPod, username, true)
- AssertUserExists(primaryPod, userWithPerpetualPass, true)
- AssertUserExists(primaryPod, userWithHashedPassword, true)
- AssertUserExists(primaryPod, unrealizableUser, false)
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(username), "t"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(userWithPerpetualPass), "t"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(userWithHashedPassword), "t"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(unrealizableUser), "f"), 30).Should(Succeed())
query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+
"and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+
@@ -152,11 +156,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
for _, q := range []string{query, query2} {
stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ testsUtils.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- utils.PostgresDBName,
+ testsUtils.PostgresDBName,
q)
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(Equal("t\n"))
@@ -164,17 +168,20 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verifying connectivity of new managed role", func() {
- rwService := utils.GetReadWriteServiceName(clusterName)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env)
- AssertConnection(namespace, rwService, utils.PostgresDBName, userWithHashedPassword, userWithHashedPassword, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName,
+ username, password, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName,
+ userWithHashedPassword, userWithHashedPassword, env)
})
By("ensuring the app role has been granted createdb in the managed stanza", func() {
primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertUserExists(primaryPodInfo, appUsername, true)
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ roleExistsQuery(appUsername), "t"), 30).Should(Succeed())
query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+
"FROM pg_roles WHERE rolname='%s'", appUsername)
@@ -186,7 +193,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).NotTo(HaveOccurred())
appUserSecret := corev1.Secret{}
- err = utils.GetObject(
+ err = testsUtils.GetObject(
env,
types.NamespacedName{Name: cluster.GetApplicationSecretName(), Namespace: namespace},
&appUserSecret,
@@ -194,9 +201,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).NotTo(HaveOccurred())
pass := string(appUserSecret.Data["password"])
- rwService := utils.GetReadWriteServiceName(clusterName)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, utils.PostgresDBName, appUsername, pass, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName, appUsername, pass, env)
})
By("Verify show unrealizable role configurations in the status", func() {
@@ -220,7 +227,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
expectedCreateDB := false
expectedCreateRole := true
expectedConnLmt := int64(10)
- rwService := utils.GetReadWriteServiceName(clusterName)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
By("updating role attribute in spec", func() {
cluster, err := env.GetCluster(namespace, clusterName)
@@ -243,8 +250,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("the connection should fail since we disabled the login", func() {
- forwardConn, conn, err := utils.ForwardPSQLServiceConnection(env, namespace, rwService,
- utils.PostgresDBName, username, password)
+ forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, rwService,
+ testsUtils.PostgresDBName, username, password)
defer func() {
_ = conn.Close()
forwardConn.Close()
@@ -274,9 +281,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("the connectivity should be success again", func() {
- rwService := utils.GetReadWriteServiceName(clusterName)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, password, env)
})
})
@@ -370,7 +377,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
- AssertUserExists(primaryPod, unrealizableUser, true)
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed())
})
By("Add role in InRole for role new_role and verify in database", func() {
@@ -431,7 +439,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
// user not changed
- AssertUserExists(primaryPod, unrealizableUser, true)
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed())
Eventually(func() int {
cluster, err := env.GetCluster(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
@@ -464,9 +473,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verify connectivity using changed password in secret", func() {
- rwService := utils.GetReadWriteServiceName(clusterName)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env)
})
By("Update password in database", func() {
@@ -474,18 +483,18 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
username, pq.QuoteLiteral(newPassword))
_, _, err = env.ExecQueryInInstancePod(
- utils.PodLocator{
+ testsUtils.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
},
- utils.PostgresDBName,
+ testsUtils.PostgresDBName,
query)
Expect(err).ToNot(HaveOccurred())
})
By("Verify password in secrets is still valid", func() {
- rwService := utils.GetReadWriteServiceName(clusterName)
- AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env)
+ rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env)
})
})
@@ -497,12 +506,12 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
updated := cluster.DeepCopy()
for i, r := range updated.Spec.Managed.Roles {
if r.Name == newUserName {
- updated.Spec.Managed.Roles[i].ValidUntil = &v1.Time{}
+ updated.Spec.Managed.Roles[i].ValidUntil = &metav1.Time{}
}
if r.Name == username {
tt, err := time.Parse(time.RFC3339Nano, newValidUntilString)
Expect(err).ToNot(HaveOccurred())
- nt := v1.NewTime(tt)
+ nt := metav1.NewTime(tt)
updated.Spec.Managed.Roles[i].ValidUntil = &nt
}
}
@@ -544,7 +553,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
By("Verify new_role not existed in db", func() {
primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertUserExists(primaryPod, newUserName, false)
+ Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ roleExistsQuery(newUserName), "f"), 30).Should(Succeed())
})
})
})
diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go
index 3133bd3ef7..0ffa10d918 100644
--- a/tests/e2e/publication_subscription_test.go
+++ b/tests/e2e/publication_subscription_test.go
@@ -24,7 +24,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -33,7 +33,7 @@ import (
// - spinning up a cluster, apply a declarative publication/subscription on it
// Set of tests in which we use the declarative publication and subscription CRDs on an existing cluster
-var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePubSub), func() {
+var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSubscription), func() {
const (
sourceClusterManifest = fixturesDir + "/declarative_pub_sub/source-cluster.yaml.template"
destinationClusterManifest = fixturesDir + "/declarative_pub_sub/destination-cluster.yaml.template"
@@ -54,13 +54,12 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
const (
namespacePrefix = "declarative-pub-sub"
dbname = "declarative"
+ subName = "sub"
+ pubName = "pub"
tableName = "test"
)
var (
sourceClusterName, destinationClusterName, namespace string
- databaseObjectName, pubObjectName, subObjectName string
- pub *apiv1.Publication
- sub *apiv1.Subscription
err error
)
@@ -84,8 +83,51 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
})
})
- assertCreateDatabase := func(namespace, clusterName, databaseManifest, databaseName string) {
- databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest)
+ AfterEach(func() {
+ // We want to reuse the same source and destination Cluster, so
+ // we need to drop each Postgres object that has been created.
+ // We need to make sure that publication/subscription have been removed before
+ // attempting to drop the database, otherwise the DROP DATABASE will fail because
+ // there's an active logical replication slot.
+ destPrimaryPod, err := env.GetClusterPrimary(namespace, destinationClusterName)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err = env.EventuallyExecQueryInInstancePod(
+ testsUtils.PodLocator{
+ Namespace: destPrimaryPod.Namespace,
+ PodName: destPrimaryPod.Name,
+ },
+ dbname,
+ fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", subName),
+ RetryTimeout,
+ PollingTime,
+ )
+ Expect(err).ToNot(HaveOccurred())
+
+ sourcePrimaryPod, err := env.GetClusterPrimary(namespace, sourceClusterName)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err = env.EventuallyExecQueryInInstancePod(
+ testsUtils.PodLocator{
+ Namespace: sourcePrimaryPod.Namespace,
+ PodName: sourcePrimaryPod.Name,
+ },
+ dbname,
+ fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pubName),
+ RetryTimeout,
+ PollingTime,
+ )
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(DeleteResourcesFromFile(namespace, destinationDatabaseManifest)).To(Succeed())
+ Expect(DeleteResourcesFromFile(namespace, sourceDatabaseManifest)).To(Succeed())
+ Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, testsUtils.PostgresDBName,
+ databaseExistsQuery(dbname), "f"), 30).Should(Succeed())
+ Eventually(QueryMatchExpectationPredicate(destPrimaryPod, testsUtils.PostgresDBName,
+ databaseExistsQuery(dbname), "f"), 30).Should(Succeed())
+ })
+
+ assertCreateDatabase := func(namespace, clusterName, databaseManifest string) {
+ databaseObject := &apiv1.Database{}
+ databaseObjectName, err := env.GetResourceNameFromYAML(databaseManifest)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() {
@@ -93,7 +135,6 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
})
By(fmt.Sprintf("ensuring the %s Database CRD succeeded reconciliation", databaseObjectName), func() {
- databaseObject := &apiv1.Database{}
databaseNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: databaseObjectName,
@@ -106,81 +147,33 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
}, 300).WithPolling(10 * time.Second).Should(Succeed())
})
- By(fmt.Sprintf("verifying the %s database has been created", databaseName), func() {
+ By(fmt.Sprintf("verifying the %s database has been created", databaseObject.Spec.Name), func() {
primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertDatabaseExists(primaryPodInfo, databaseName, true)
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ databaseExistsQuery(databaseObject.Spec.Name), "t"), 30).Should(Succeed())
})
}
- assertPublicationExists := func(namespace, primaryPod string, pub *apiv1.Publication) {
- query := fmt.Sprintf("select count(*) from pg_publication where pubname = '%s'",
- pub.Spec.Name)
- Eventually(func(g Gomega) {
- stdout, _, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
- Namespace: namespace,
- PodName: primaryPod,
- },
- dbname,
- query)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(stdout).Should(ContainSubstring("1"), "expected publication not found")
- }, 30).Should(Succeed())
- }
-
- assertSubscriptionExists := func(namespace, primaryPod string, sub *apiv1.Subscription) {
- query := fmt.Sprintf("select count(*) from pg_subscription where subname = '%s'",
- sub.Spec.Name)
- Eventually(func(g Gomega) {
- stdout, _, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
- Namespace: namespace,
- PodName: primaryPod,
- },
- dbname,
- query)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(stdout).Should(ContainSubstring("1"), "expected subscription not found")
- }, 30).Should(Succeed())
- }
-
- It("can perform logical replication", func() {
- assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest, dbname)
-
- tableLocator := TableLocator{
- Namespace: namespace,
- ClusterName: sourceClusterName,
- DatabaseName: dbname,
- TableName: tableName,
- }
- AssertCreateTestData(env, tableLocator)
-
- assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest, dbname)
-
- By("creating an empty table inside the destination database", func() {
- query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName)
- _, err = testUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname,
- apiv1.ApplicationUserSecretSuffix, query)
- Expect(err).ToNot(HaveOccurred())
- })
+ // nolint:dupl
+ assertCreatePublication := func(namespace, clusterName, publicationManifest string) {
+ pubObjectName, err := env.GetResourceNameFromYAML(publicationManifest)
+ Expect(err).NotTo(HaveOccurred())
By("applying Publication CRD manifest", func() {
- CreateResourceFromFile(namespace, pubManifest)
- pubObjectName, err = env.GetResourceNameFromYAML(pubManifest)
- Expect(err).NotTo(HaveOccurred())
+ CreateResourceFromFile(namespace, publicationManifest)
})
By("ensuring the Publication CRD succeeded reconciliation", func() {
// get publication object
- pub = &apiv1.Publication{}
pubNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: pubObjectName,
}
Eventually(func(g Gomega) {
+ pub := &apiv1.Publication{}
err := env.Client.Get(env.Ctx, pubNamespacedName, pub)
Expect(err).ToNot(HaveOccurred())
g.Expect(pub.Status.Applied).Should(HaveValue(BeTrue()))
@@ -188,27 +181,32 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
})
By("verifying new publication has been created", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName)
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- assertPublicationExists(namespace, primaryPodInfo.Name, pub)
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
+ publicationExistsQuery(pubName), "t"), 30).Should(Succeed())
})
+ }
+
+ // nolint:dupl
+ assertCreateSubscription := func(namespace, clusterName, subscriptionManifest string) {
+ subObjectName, err := env.GetResourceNameFromYAML(subscriptionManifest)
+ Expect(err).NotTo(HaveOccurred())
By("applying Subscription CRD manifest", func() {
- CreateResourceFromFile(namespace, subManifest)
- subObjectName, err = env.GetResourceNameFromYAML(subManifest)
- Expect(err).NotTo(HaveOccurred())
+ CreateResourceFromFile(namespace, subscriptionManifest)
})
By("ensuring the Subscription CRD succeeded reconciliation", func() {
// get subscription object
- sub = &apiv1.Subscription{}
pubNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: subObjectName,
}
Eventually(func(g Gomega) {
+ sub := &apiv1.Subscription{}
err := env.Client.Get(env.Ctx, pubNamespacedName, sub)
Expect(err).ToNot(HaveOccurred())
g.Expect(sub.Status.Applied).Should(HaveValue(BeTrue()))
@@ -216,10 +214,77 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
})
By("verifying new subscription has been created", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName)
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- assertSubscriptionExists(namespace, primaryPodInfo.Name, sub)
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
+ subscriptionExistsQuery(subName), "t"), 30).Should(Succeed())
+ })
+ }
+
+ assertTestPubSub := func(retainOnDeletion bool) {
+ assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest)
+
+ tableLocator := TableLocator{
+ Namespace: namespace,
+ ClusterName: sourceClusterName,
+ DatabaseName: dbname,
+ TableName: tableName,
+ }
+ AssertCreateTestData(env, tableLocator)
+
+ assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest)
+
+ By("creating an empty table inside the destination database", func() {
+ query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName)
+ _, err = testsUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname,
+ apiv1.ApplicationUserSecretSuffix, query)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ assertCreatePublication(namespace, sourceClusterName, pubManifest)
+ assertCreateSubscription(namespace, destinationClusterName, subManifest)
+
+ var (
+ publication *apiv1.Publication
+ subscription *apiv1.Subscription
+ )
+ By("setting the reclaimPolicy", func() {
+ publicationReclaimPolicy := apiv1.PublicationReclaimDelete
+ subscriptionReclaimPolicy := apiv1.SubscriptionReclaimDelete
+ if retainOnDeletion {
+ publicationReclaimPolicy = apiv1.PublicationReclaimRetain
+ subscriptionReclaimPolicy = apiv1.SubscriptionReclaimRetain
+ }
+ // Get the object names
+ pubObjectName, err := env.GetResourceNameFromYAML(pubManifest)
+ Expect(err).NotTo(HaveOccurred())
+ subObjectName, err := env.GetResourceNameFromYAML(subManifest)
+ Expect(err).NotTo(HaveOccurred())
+
+ Eventually(func(g Gomega) {
+ var pub apiv1.Publication
+ err = testsUtils.GetObject(
+ env,
+ types.NamespacedName{Namespace: namespace, Name: pubObjectName},
+ &pub,
+ )
+ g.Expect(err).ToNot(HaveOccurred())
+ publication.Spec.ReclaimPolicy = publicationReclaimPolicy
+ err = env.Client.Update(env.Ctx, publication)
+ g.Expect(err).ToNot(HaveOccurred())
+
+ var sub apiv1.Subscription
+ err = testsUtils.GetObject(
+ env,
+ types.NamespacedName{Namespace: namespace, Name: subObjectName},
+ &sub,
+ )
+ g.Expect(err).ToNot(HaveOccurred())
+ subscription.Spec.ReclaimPolicy = subscriptionReclaimPolicy
+ err = env.Client.Update(env.Ctx, subscription)
+ g.Expect(err).ToNot(HaveOccurred())
+ }, 60, 5).Should(Succeed())
})
By("checking that the data is present inside the destination cluster database", func() {
@@ -231,6 +296,47 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub
}
AssertDataExpectedCount(env, tableLocator, 2)
})
+
+ By("removing the objects", func() {
+ Expect(testsUtils.DeleteObject(env, publication)).To(Succeed())
+ Expect(testsUtils.DeleteObject(env, subscription)).To(Succeed())
+ })
+
+ By("verifying the publication reclaim policy outcome", func() {
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
+ publicationExistsQuery(pubName), boolPGOutput(retainOnDeletion)), 30).Should(Succeed())
+ })
+
+ By("verifying the subscription reclaim policy outcome", func() {
+ primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
+ subscriptionExistsQuery(subName), boolPGOutput(retainOnDeletion)), 30).Should(Succeed())
+ })
+ }
+
+ When("Reclaim policy is set to delete", func() {
+ It("can manage Publication and Subscription and delete them in Postgres", func() {
+ assertTestPubSub(false)
+ })
+ })
+
+ When("Reclaim policy is set to retain", func() {
+ It("can manage Publication and Subscription and release it", func() {
+ assertTestPubSub(true)
+ })
})
})
})
+
+func publicationExistsQuery(pubName string) string {
+ return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname='%s')", pubName)
+}
+
+func subscriptionExistsQuery(subName string) string {
+ return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_subscription WHERE subname='%s')", subName)
+}
diff --git a/tests/labels.go b/tests/labels.go
index 98649f2be2..ee37925343 100644
--- a/tests/labels.go
+++ b/tests/labels.go
@@ -32,9 +32,6 @@ const (
// LabelDeclarativeDatabases is a label for selecting the declarative databases test
LabelDeclarativeDatabases = "declarative-databases"
- // LabelDeclarativePubSub is a label for selecting the publication / subscription test
- LabelDeclarativePubSub = "publication-subscription"
-
// LabelDisruptive is the string for labelling disruptive tests
LabelDisruptive = "disruptive"
@@ -65,6 +62,9 @@ const (
// LabelPostgresConfiguration is a label for selecting postgres-configuration test
LabelPostgresConfiguration = "postgres-configuration"
+ // LabelPublicationSubscription is a label for selecting the publication / subscription test
+ LabelPublicationSubscription = "publication-subscription"
+
// LabelRecovery is a label for selecting recovery tests
LabelRecovery = "recovery"
From 7eabd38fb70adfaa4c6feeb7b3aa996da8aeef4b Mon Sep 17 00:00:00 2001
From: Peggie
Date: Thu, 19 Dec 2024 10:38:59 +0100
Subject: [PATCH 244/836] feat: Public Cloud K8S versions update (#6316)
Update the versions used to test the operator on public cloud providers
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: public-cloud-k8s-versions-check
---
.github/kind_versions.json | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/.github/kind_versions.json b/.github/kind_versions.json
index 85547c7125..b39d642e5d 100644
--- a/.github/kind_versions.json
+++ b/.github/kind_versions.json
@@ -1,7 +1,8 @@
[
- "v1.31.2",
- "v1.30.6",
- "v1.29.10",
+ "v1.32.0",
+ "v1.31.4",
+ "v1.30.8",
+ "v1.29.12",
"v1.28.15",
"v1.27.16"
]
From 2cce9742abbbf8133f24435d6e5436b94185b1fa Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 19 Dec 2024 11:05:49 +0100
Subject: [PATCH 245/836] chore(deps): update kindest/node docker tag to
v1.32.0 (main) (#6365)
---
hack/e2e/run-e2e-kind.sh | 2 +-
hack/setup-cluster.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh
index e795c4a4dc..c28579750c 100755
--- a/hack/e2e/run-e2e-kind.sh
+++ b/hack/e2e/run-e2e-kind.sh
@@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e"
export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false}
export BUILD_IMAGE=${BUILD_IMAGE:-false}
-KIND_NODE_DEFAULT_VERSION=v1.31.2
+KIND_NODE_DEFAULT_VERSION=v1.32.0
export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION}
export CLUSTER_ENGINE=kind
export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-}
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index a0166bca71..e668d5097e 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then
fi
# Defaults
-KIND_NODE_DEFAULT_VERSION=v1.31.2
+KIND_NODE_DEFAULT_VERSION=v1.32.0
K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
From 0be01557b747745e93c60375c6b9c58f4c2637cc Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 19 Dec 2024 11:35:08 +0100
Subject: [PATCH 246/836] chore(deps): update helm/kind-action action to
v1.11.0 (main) (#6364)
---
.github/workflows/continuous-integration.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index fe8629a5e5..5b15566bba 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -829,7 +829,7 @@ jobs:
uses: actions/checkout@v4
- name: Setting up KinD cluster
- uses: helm/kind-action@v1.10.0
+ uses: helm/kind-action@v1.11.0
with:
wait: "600s"
version: ${{ env.KIND_VERSION }}
From 396e3bd62a1b53ed29dd03a167b003f1d69e90cb Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 19 Dec 2024 14:12:37 +0100
Subject: [PATCH 247/836] fix(deps): update github.com/cloudnative-pg/machinery
digest to 2807bc8 (main) (#6338)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 20e25fbcee..0467d2e463 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee
- github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61
+ github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
diff --git a/go.sum b/go.sum
index 53e874f1bf..ded4172b34 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrE
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee h1:PJc4BpPu0b684BrwWzy0B5W/CSqrnUV+jv3PTrSUx8g=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee/go.mod h1:ahVFn+JzYkFfv7Iwpswu4lsuC9yK7zZupM1ssaIKPFI=
-github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs=
-github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
+github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d h1:v9IgiRYa7r+KCUxl5lCyUXdhsefZ90engPSMNLBqYmc=
+github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
From c9abaf65816b66616512a56a3ff16442a2159274 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Thu, 19 Dec 2024 14:34:46 +0100
Subject: [PATCH 248/836] test(e2e): fix a panic in publication and
subscription test (#6378)
The problem was introduced in #6320
Signed-off-by: Marco Nenciarini
---
tests/e2e/publication_subscription_test.go | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go
index 0ffa10d918..e6dccd6e66 100644
--- a/tests/e2e/publication_subscription_test.go
+++ b/tests/e2e/publication_subscription_test.go
@@ -246,8 +246,8 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
assertCreateSubscription(namespace, destinationClusterName, subManifest)
var (
- publication *apiv1.Publication
- subscription *apiv1.Subscription
+ publication apiv1.Publication
+ subscription apiv1.Subscription
)
By("setting the reclaimPolicy", func() {
publicationReclaimPolicy := apiv1.PublicationReclaimDelete
@@ -263,26 +263,24 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
- var pub apiv1.Publication
err = testsUtils.GetObject(
env,
types.NamespacedName{Namespace: namespace, Name: pubObjectName},
- &pub,
+ &publication,
)
g.Expect(err).ToNot(HaveOccurred())
publication.Spec.ReclaimPolicy = publicationReclaimPolicy
- err = env.Client.Update(env.Ctx, publication)
+ err = env.Client.Update(env.Ctx, &publication)
g.Expect(err).ToNot(HaveOccurred())
- var sub apiv1.Subscription
err = testsUtils.GetObject(
env,
types.NamespacedName{Namespace: namespace, Name: subObjectName},
- &sub,
+ &subscription,
)
g.Expect(err).ToNot(HaveOccurred())
subscription.Spec.ReclaimPolicy = subscriptionReclaimPolicy
- err = env.Client.Update(env.Ctx, subscription)
+ err = env.Client.Update(env.Ctx, &subscription)
g.Expect(err).ToNot(HaveOccurred())
}, 60, 5).Should(Succeed())
})
@@ -298,8 +296,8 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
})
By("removing the objects", func() {
- Expect(testsUtils.DeleteObject(env, publication)).To(Succeed())
- Expect(testsUtils.DeleteObject(env, subscription)).To(Succeed())
+ Expect(testsUtils.DeleteObject(env, &publication)).To(Succeed())
+ Expect(testsUtils.DeleteObject(env, &subscription)).To(Succeed())
})
By("verifying the publication reclaim policy outcome", func() {
From c59451ab820399cba5e8d1914fe6fe9651c0c16f Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 19 Dec 2024 16:04:55 +0100
Subject: [PATCH 249/836] fix(deps): update kubernetes packages to v0.32.0
(main) (#6354)
https://github.com/kubernetes/api `v0.31.3` -> `v0.32.0`
https://github.com/kubernetes/apiextensions-apiserver `v0.31.3` -> `v0.32.0`
https://github.com/kubernetes/apimachinery `v0.31.3` -> `v0.32.0`
https://github.com/kubernetes/cli-runtime `v0.31.3` -> `v0.32.0`
https://github.com/kubernetes/client-go `v0.31.3` -> `v0.32.0`
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jonathan Gonzalez V.
---
.../bases/postgresql.cnpg.io_clusters.yaml | 2 +-
.../crd/bases/postgresql.cnpg.io_poolers.yaml | 340 ++++++++++++------
go.mod | 30 +-
go.sum | 57 ++-
4 files changed, 271 insertions(+), 158 deletions(-)
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index e185082fa7..1057f16c99 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -3633,7 +3633,7 @@ spec:
not set, the implementation will apply its default routing strategy. If set
to "PreferClose", implementations should prioritize endpoints that are
topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ This is a beta field and requires enabling ServiceTrafficDistribution feature.
type: string
type:
description: |-
diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
index ac283c038f..6039e1e5ea 100644
--- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
@@ -707,7 +707,7 @@ spec:
not set, the implementation will apply its default routing strategy. If set
to "PreferClose", implementations should prioritize endpoints that are
topologically close (e.g., same zone).
- This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ This is a beta field and requires enabling ServiceTrafficDistribution feature.
type: string
type:
description: |-
@@ -1958,7 +1958,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -1973,7 +1974,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -2024,8 +2025,8 @@ spec:
- port
type: object
sleep:
- description: Sleep represents the duration that
- the container should sleep before being terminated.
+ description: Sleep represents a duration that
+ the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@@ -2038,8 +2039,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- for the backward compatibility. There are no validation of this field and
- lifecycle hooks will fail in runtime when tcp handler is specified.
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@@ -2071,7 +2072,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -2086,7 +2088,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -2137,8 +2139,8 @@ spec:
- port
type: object
sleep:
- description: Sleep represents the duration that
- the container should sleep before being terminated.
+ description: Sleep represents a duration that
+ the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@@ -2151,8 +2153,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- for the backward compatibility. There are no validation of this field and
- lifecycle hooks will fail in runtime when tcp handler is specified.
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@@ -2180,7 +2182,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -2201,8 +2204,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -2221,7 +2223,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -2289,7 +2291,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -2395,7 +2397,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -2416,8 +2419,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -2436,7 +2438,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -2504,7 +2506,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -2854,7 +2856,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -2875,8 +2878,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -2895,7 +2897,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -2963,7 +2965,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -3178,9 +3180,13 @@ spec:
options of a pod.
properties:
name:
- description: Required.
+ description: |-
+ Name is this DNS resolver option's name.
+ Required.
type: string
value:
+ description: Value is this DNS resolver option's
+ value.
type: string
type: object
type: array
@@ -3463,7 +3469,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -3478,7 +3485,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -3529,8 +3536,8 @@ spec:
- port
type: object
sleep:
- description: Sleep represents the duration that
- the container should sleep before being terminated.
+ description: Sleep represents a duration that
+ the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@@ -3543,8 +3550,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- for the backward compatibility. There are no validation of this field and
- lifecycle hooks will fail in runtime when tcp handler is specified.
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@@ -3576,7 +3583,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -3591,7 +3599,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -3642,8 +3650,8 @@ spec:
- port
type: object
sleep:
- description: Sleep represents the duration that
- the container should sleep before being terminated.
+ description: Sleep represents a duration that
+ the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@@ -3656,8 +3664,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- for the backward compatibility. There are no validation of this field and
- lifecycle hooks will fail in runtime when tcp handler is specified.
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@@ -3681,7 +3689,8 @@ spec:
description: Probes are not allowed for ephemeral containers.
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -3702,8 +3711,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -3722,7 +3730,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -3790,7 +3798,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -3884,7 +3892,8 @@ spec:
description: Probes are not allowed for ephemeral containers.
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -3905,8 +3914,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -3925,7 +3933,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -3993,7 +4001,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -4323,7 +4331,8 @@ spec:
description: Probes are not allowed for ephemeral containers.
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -4344,8 +4353,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -4364,7 +4372,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -4432,7 +4440,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -4971,7 +4979,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -4986,7 +4995,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -5037,8 +5046,8 @@ spec:
- port
type: object
sleep:
- description: Sleep represents the duration that
- the container should sleep before being terminated.
+ description: Sleep represents a duration that
+ the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@@ -5051,8 +5060,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- for the backward compatibility. There are no validation of this field and
- lifecycle hooks will fail in runtime when tcp handler is specified.
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@@ -5084,7 +5093,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -5099,7 +5109,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -5150,8 +5160,8 @@ spec:
- port
type: object
sleep:
- description: Sleep represents the duration that
- the container should sleep before being terminated.
+ description: Sleep represents a duration that
+ the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@@ -5164,8 +5174,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- for the backward compatibility. There are no validation of this field and
- lifecycle hooks will fail in runtime when tcp handler is specified.
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@@ -5193,7 +5203,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -5214,8 +5225,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -5234,7 +5244,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -5302,7 +5312,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -5408,7 +5418,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -5429,8 +5440,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -5449,7 +5459,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -5517,7 +5527,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -5867,7 +5877,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
- description: Exec specifies the action to take.
+ description: Exec specifies a command to execute
+ in the container.
properties:
command:
description: |-
@@ -5888,8 +5899,7 @@ spec:
format: int32
type: integer
grpc:
- description: GRPC specifies an action involving
- a GRPC port.
+ description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@@ -5908,7 +5918,7 @@ spec:
- port
type: object
httpGet:
- description: HTTPGet specifies the http request
+ description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@@ -5976,7 +5986,7 @@ spec:
format: int32
type: integer
tcpSocket:
- description: TCPSocket specifies an action involving
+ description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@@ -6343,6 +6353,74 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
+ resources:
+ description: |-
+ Resources is the total amount of CPU and Memory resources required by all
+ containers in the pod. It supports specifying Requests and Limits for
+ "cpu" and "memory" resource names only. ResourceClaims are not supported.
+
+ This field enables fine-grained control over resource allocation for the
+ entire pod, allowing resource sharing among containers in a pod.
+
+ This is an alpha field and requires enabling the PodLevelResources feature
+ gate.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
restartPolicy:
description: |-
Restart policy for all containers within the pod.
@@ -6467,6 +6545,32 @@ spec:
Note that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
+ seLinuxChangePolicy:
+ description: |-
+ seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+ It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+ Valid values are "MountOption" and "Recursive".
+
+ "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+ This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+
+ "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+ This requires all Pods that share the same volume to use the same SELinux label.
+ It is not possible to share the same volume among privileged and unprivileged Pods.
+ Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+ whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+ CSIDriver instance. Other volumes are always re-labelled recursively.
+ "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+
+ If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+ If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+ and "Recursive" for all other volumes.
+
+ This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+
+ All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
seLinuxOptions:
description: |-
The SELinux context to be applied to all containers.
@@ -6873,6 +6977,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@@ -6904,8 +7010,10 @@ spec:
- volumeID
type: object
azureDisk:
- description: azureDisk represents an Azure Data Disk
- mount on the host and bind mount to the pod.
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode:
@@ -6944,8 +7052,10 @@ spec:
- diskURI
type: object
azureFile:
- description: azureFile represents an Azure File Service
- mount on the host and bind mount to the pod.
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@@ -6964,8 +7074,9 @@ spec:
- shareName
type: object
cephfs:
- description: cephFS represents a Ceph FS mount on the
- host that shares a pod's lifetime
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@@ -7018,6 +7129,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@@ -7129,7 +7242,7 @@ spec:
csi:
description: csi (Container Storage Interface) represents
ephemeral storage that is handled by certain external
- CSI drivers (Beta feature).
+ CSI drivers.
properties:
driver:
description: |-
@@ -7600,6 +7713,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to
@@ -7645,9 +7759,9 @@ spec:
- driver
type: object
flocker:
- description: flocker represents a Flocker volume attached
- to a kubelet's host machine. This depends on the Flocker
- control service being running
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@@ -7663,6 +7777,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@@ -7698,7 +7814,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
- DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@@ -7722,6 +7838,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@@ -7931,9 +8048,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
- description: photonPersistentDisk represents a PhotonController
- persistent disk attached and mounted on kubelets host
- machine
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@@ -7949,8 +8066,11 @@ spec:
- pdID
type: object
portworxVolume:
- description: portworxVolume represents a portworx volume
- attached and mounted on kubelets host machine
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
properties:
fsType:
description: |-
@@ -8321,8 +8441,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
- description: quobyte represents a Quobyte mount on the
- host that shares a pod's lifetime
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@@ -8361,6 +8482,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@@ -8433,8 +8555,9 @@ spec:
- monitors
type: object
scaleIO:
- description: scaleIO represents a ScaleIO persistent
- volume attached and mounted on Kubernetes nodes.
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@@ -8567,8 +8690,9 @@ spec:
type: string
type: object
storageos:
- description: storageOS represents a StorageOS volume
- attached and mounted on Kubernetes nodes.
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@@ -8613,8 +8737,10 @@ spec:
type: string
type: object
vsphereVolume:
- description: vsphereVolume represents a vSphere volume
- attached and mounted on kubelets host machine
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
diff --git a/go.mod b/go.mod
index 0467d2e463..f0ba746aaa 100644
--- a/go.mod
+++ b/go.mod
@@ -1,8 +1,8 @@
module github.com/cloudnative-pg/cloudnative-pg
-go 1.23
+go 1.23.0
-toolchain go1.23.3
+toolchain go1.23.4
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
@@ -40,11 +40,11 @@ require (
golang.org/x/term v0.27.0
google.golang.org/grpc v1.69.2
gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.31.3
- k8s.io/apiextensions-apiserver v0.31.3
- k8s.io/apimachinery v0.31.3
- k8s.io/cli-runtime v0.31.3
- k8s.io/client-go v0.31.3
+ k8s.io/api v0.32.0
+ k8s.io/apiextensions-apiserver v0.32.0
+ k8s.io/apimachinery v0.32.0
+ k8s.io/cli-runtime v0.32.0
+ k8s.io/client-go v0.32.0
k8s.io/utils v0.0.0-20241210054802-24370beab758
sigs.k8s.io/controller-runtime v0.19.3
sigs.k8s.io/yaml v1.4.0
@@ -65,7 +65,6 @@ require (
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
@@ -75,7 +74,6 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
- github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
@@ -86,7 +84,7 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/moby/spdystream v0.4.0 // indirect
+ github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@@ -101,7 +99,6 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
- go.starlark.net v0.0.0-20240925182052-1207426daebd // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
golang.org/x/net v0.30.0 // indirect
@@ -116,11 +113,10 @@ require (
google.golang.org/protobuf v1.36.0 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/kustomize/api v0.17.3 // indirect
- sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/kustomize/api v0.18.0 // indirect
+ sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
)
diff --git a/go.sum b/go.sum
index ded4172b34..3f3e51ffa4 100644
--- a/go.sum
+++ b/go.sum
@@ -59,8 +59,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
@@ -85,8 +83,6 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0=
-github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
-github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
@@ -131,8 +127,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
-github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
-github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
+github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
+github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -183,8 +179,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/stern/stern v1.31.0 h1:kKHVgEmIgqbC6/sFZahUeU9TbxDH+0l3l5/ornLlQLs=
github.com/stern/stern v1.31.0/go.mod h1:BfAeaPQhkMhQPTaFV81pS8YWCBmxg6IBL8fPGalt0qY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -208,8 +204,6 @@ go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4Jjx
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
-go.starlark.net v0.0.0-20240925182052-1207426daebd h1:S+EMisJOHklQxnS3kqsY8jl2y5aF0FDEdcLnOw3q22E=
-go.starlark.net v0.0.0-20240925182052-1207426daebd/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -282,37 +276,34 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWM
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
-k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
-k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE=
-k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4=
-k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
-k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI=
-k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8=
-k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
-k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
+k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
+k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
+k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0=
+k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw=
+k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
+k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c=
+k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ=
+k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
+k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo=
-k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU=
-sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc=
-sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0=
-sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
+sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
+sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
+sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
From bc55791b7ff05b322ad6ea240d0b67d6d471c1ff Mon Sep 17 00:00:00 2001
From: Gabriele Quaresima
Date: Thu, 19 Dec 2024 16:35:17 +0100
Subject: [PATCH 250/836] fix(roles): properly quote inRoles in SQL statements
(#6346)
This patch fixes an issue where the `inRoles` parameter was not properly
quoted in SQL statements, which could cause syntax errors if the role
name contains special characters.
Closes #6337
Signed-off-by: wolfox
---
internal/management/controller/roles/postgres.go | 8 +++++++-
internal/management/controller/roles/postgres_test.go | 8 ++++----
2 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go
index eb1dcf913c..26c909d4b2 100644
--- a/internal/management/controller/roles/postgres.go
+++ b/internal/management/controller/roles/postgres.go
@@ -301,7 +301,13 @@ func GetParentRoles(ctx context.Context, db *sql.DB, role DatabaseRole) ([]strin
func appendInRoleOptions(role DatabaseRole, query *strings.Builder) {
if len(role.InRoles) > 0 {
- query.WriteString(fmt.Sprintf(" IN ROLE %s ", strings.Join(role.InRoles, ",")))
+ quotedInRoles := make([]string, len(role.InRoles))
+
+ for i, inRole := range role.InRoles {
+ quotedInRoles[i] = pgx.Identifier{inRole}.Sanitize()
+ }
+
+ query.WriteString(fmt.Sprintf(" IN ROLE %s ", strings.Join(quotedInRoles, ",")))
}
}
diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go
index 01f3dd1dc9..4357f62f0c 100644
--- a/internal/management/controller/roles/postgres_test.go
+++ b/internal/management/controller/roles/postgres_test.go
@@ -104,22 +104,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
}
wantedRoleExpectedCrtStmt := fmt.Sprintf(
"CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+
- "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring VALID UNTIL '2100-01-01 00:00:00Z'",
+ "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" VALID UNTIL '2100-01-01 00:00:00Z'",
wantedRole.Name)
wantedRoleWithPassExpectedCrtStmt := fmt.Sprintf(
"CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+
- "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD 'myPassword' VALID UNTIL '2100-01-01 00:00:00Z'",
+ "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD 'myPassword' VALID UNTIL '2100-01-01 00:00:00Z'",
wantedRole.Name)
wantedRoleWithoutValidUntilExpectedCrtStmt := fmt.Sprintf(
"CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+
- "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD 'myPassword'",
+ "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD 'myPassword'",
wantedRole.Name)
wantedRoleWithPassDeletionExpectedCrtStmt := fmt.Sprintf(
"CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+
- "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'",
+ "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'",
wantedRole.Name)
wantedRoleWithDefaultConnectionLimitExpectedCrtStmt := fmt.Sprintf(
"CREATE ROLE \"%s\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION "+
From b897b44fc3e6f9afe0b19654724cd9d9d4888fcc Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 19 Dec 2024 17:35:11 +0100
Subject: [PATCH 251/836] chore: add missing labels in container images (#6377)
The operator was missing a label "maintainer" for certifications
and also for standards, it was relying on having the label from
the parent image.
Signed-off-by: Jonathan Gonzalez V.
---
Dockerfile | 6 ++++--
Dockerfile-ubi8 | 6 ++++--
Dockerfile-ubi9 | 6 ++++--
3 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index c96d232364..e6d787a93b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,14 +12,16 @@ ARG VERSION="dev"
ARG TARGETARCH
ENV SUMMARY="CloudNativePG Operator Container Image." \
- DESCRIPTION="This Docker image contains CloudNativePG Operator."
+ DESCRIPTION="This Docker image contains CloudNativePG Operator." \
+ MAINTAINER="CloudNativePG Contributors."
LABEL summary="$SUMMARY" \
description="$DESCRIPTION" \
io.k8s.display-name="$SUMMARY" \
io.k8s.description="$DESCRIPTION" \
name="CloudNativePG Operator" \
- vendor="CloudNativePG Contributors" \
+ vendor="$MAINTAINER" \
+ maintainer="$MAINTAINER" \
url="https://cloudnative-pg.io/" \
version="$VERSION" \
release="1"
diff --git a/Dockerfile-ubi8 b/Dockerfile-ubi8
index 4c2712de72..1aea9e40ac 100644
--- a/Dockerfile-ubi8
+++ b/Dockerfile-ubi8
@@ -3,14 +3,16 @@ ARG VERSION="dev"
ARG TARGETARCH
ENV SUMMARY="CloudNativePG Operator Container Image." \
- DESCRIPTION="This Docker image contains CloudNativePG Operator."
+ DESCRIPTION="This Docker image contains CloudNativePG Operator." \
+ MAINTAINER="CloudNativePG Contributors."
LABEL summary="$SUMMARY" \
description="$DESCRIPTION" \
io.k8s.display-name="$SUMMARY" \
io.k8s.description="$DESCRIPTION" \
name="CloudNativePG Operator" \
- vendor="CloudNativePG Contributors" \
+ vendor="$MAINTAINER" \
+ maintainer="$MAINTAINER" \
url="https://cloudnative-pg.io/" \
version="$VERSION" \
release="1"
diff --git a/Dockerfile-ubi9 b/Dockerfile-ubi9
index 74409e03ca..0d846d91b0 100644
--- a/Dockerfile-ubi9
+++ b/Dockerfile-ubi9
@@ -3,14 +3,16 @@ ARG VERSION="dev"
ARG TARGETARCH
ENV SUMMARY="CloudNativePG Operator Container Image." \
- DESCRIPTION="This Docker image contains CloudNativePG Operator."
+ DESCRIPTION="This Docker image contains CloudNativePG Operator." \
+ MAINTAINER="CloudNativePG Contributors."
LABEL summary="$SUMMARY" \
description="$DESCRIPTION" \
io.k8s.display-name="$SUMMARY" \
io.k8s.description="$DESCRIPTION" \
name="CloudNativePG Operator" \
- vendor="CloudNativePG Contributors" \
+ vendor="$MAINTAINER" \
+ maintainer="$MAINTAINER" \
url="https://cloudnative-pg.io/" \
version="$VERSION" \
release="1"
From 60ff2ccce515eaebf666c7374e589894213cc561 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 20 Dec 2024 09:41:54 +0100
Subject: [PATCH 252/836] fix(deps): update module
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to
v0.79.2 (main) (#6379)
https://github.com/prometheus-operator/prometheus-operator v0.78.2` -> `v0.79.2`
golang.org/x/crypto `v0.28.0` -> `v0.30.0`
golang.org/x/net `v0.30.0` -> `v0.32.0`
golang.org/x/sync `v0.8.0` -> `v0.10.0`
golang.org/x/text `v0.19.0` -> `v0.21.0`
sigs.k8s.io/json `v0.0.0-20241010143419-9aa6b5e7a4b3` -> `v0.0.0-20241014173422-cfa47c3a1cc8`
sigs.k8s.io/structured-merge-diff/v4 `v4.4.2` -> `v4.5.0`
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jonathan Gonzalez V.
---
go.mod | 14 ++++++-------
go.sum | 28 +++++++++++++-------------
pkg/specs/pgbouncer/podmonitor.go | 3 ++-
pkg/specs/pgbouncer/podmonitor_test.go | 2 +-
pkg/specs/podmonitor.go | 3 ++-
pkg/specs/podmonitor_test.go | 5 +++--
6 files changed, 29 insertions(+), 26 deletions(-)
diff --git a/go.mod b/go.mod
index f0ba746aaa..fe86702085 100644
--- a/go.mod
+++ b/go.mod
@@ -27,7 +27,7 @@ require (
github.com/mitchellh/go-ps v1.0.0
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.36.1
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
github.com/sethvargo/go-password v0.3.1
@@ -99,13 +99,13 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
- golang.org/x/crypto v0.28.0 // indirect
+ golang.org/x/crypto v0.30.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
- golang.org/x/net v0.30.0 // indirect
+ golang.org/x/net v0.32.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
- golang.org/x/text v0.19.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
@@ -115,8 +115,8 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
- sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
)
diff --git a/go.sum b/go.sum
index 3f3e51ffa4..e9a085e58a 100644
--- a/go.sum
+++ b/go.sum
@@ -153,8 +153,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 h1:SyoVBXD/r0PntR1rprb90ClI32FSUNOCWqqTatnipHM=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 h1:DGv150w4UyxnjNHlkCw85R3+lspOxegtdnbpP2vKRrk=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
@@ -215,8 +215,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
+golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -225,15 +225,15 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
-golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
+golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -246,8 +246,8 @@ golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -297,13 +297,13 @@ k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJ
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
+sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk=
+sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/pkg/specs/pgbouncer/podmonitor.go b/pkg/specs/pgbouncer/podmonitor.go
index 579a8006d6..ff7962e3b9 100644
--- a/pkg/specs/pgbouncer/podmonitor.go
+++ b/pkg/specs/pgbouncer/podmonitor.go
@@ -51,8 +51,9 @@ func (c PoolerPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor {
utils.SetAsOwnedBy(&meta, c.pooler.ObjectMeta, c.pooler.TypeMeta)
+ metricsPort := "metrics"
endpoint := monitoringv1.PodMetricsEndpoint{
- Port: "metrics",
+ Port: &metricsPort,
}
if c.pooler.Spec.Monitoring != nil {
diff --git a/pkg/specs/pgbouncer/podmonitor_test.go b/pkg/specs/pgbouncer/podmonitor_test.go
index 8643955ff5..66a16fd644 100644
--- a/pkg/specs/pgbouncer/podmonitor_test.go
+++ b/pkg/specs/pgbouncer/podmonitor_test.go
@@ -79,7 +79,7 @@ var _ = Describe("PoolerPodMonitorManager", func() {
}))
Expect(podMonitor.Spec.PodMetricsEndpoints).To(HaveLen(1))
- Expect(podMonitor.Spec.PodMetricsEndpoints[0].Port).To(Equal("metrics"))
+ Expect(*podMonitor.Spec.PodMetricsEndpoints[0].Port).To(Equal("metrics"))
})
})
diff --git a/pkg/specs/podmonitor.go b/pkg/specs/podmonitor.go
index 25770e3684..4a7ab863ef 100644
--- a/pkg/specs/podmonitor.go
+++ b/pkg/specs/podmonitor.go
@@ -44,8 +44,9 @@ func (c ClusterPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor {
}
c.cluster.SetInheritedDataAndOwnership(&meta)
+ metricsPort := "metrics"
endpoint := monitoringv1.PodMetricsEndpoint{
- Port: "metrics",
+ Port: &metricsPort,
}
if c.cluster.IsMetricsTLSEnabled() {
diff --git a/pkg/specs/podmonitor_test.go b/pkg/specs/podmonitor_test.go
index 6c486e1808..b043eb5fe5 100644
--- a/pkg/specs/podmonitor_test.go
+++ b/pkg/specs/podmonitor_test.go
@@ -35,6 +35,7 @@ var _ = Describe("PodMonitor test", func() {
clusterName = "test"
clusterNamespace = "test-namespace"
)
+ metricsPort := "metrics"
assertPodMonitorCorrect := func(cluster *apiv1.Cluster, expectedEndpoint monitoringv1.PodMetricsEndpoint) {
getMetricRelabelings := func() []monitoringv1.RelabelConfig {
@@ -121,7 +122,7 @@ var _ = Describe("PodMonitor test", func() {
},
}
- expectedEndpoint := monitoringv1.PodMetricsEndpoint{Port: "metrics"}
+ expectedEndpoint := monitoringv1.PodMetricsEndpoint{Port: &metricsPort}
assertPodMonitorCorrect(&cluster, expectedEndpoint)
})
@@ -142,7 +143,7 @@ var _ = Describe("PodMonitor test", func() {
}
expectedEndpoint := monitoringv1.PodMetricsEndpoint{
- Port: "metrics",
+ Port: &metricsPort,
Scheme: "https",
TLSConfig: &monitoringv1.SafeTLSConfig{
CA: monitoringv1.SecretOrConfigMap{
From d90792f87a36e1f884fd3e98eb2b6763e9a36b90 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 20 Dec 2024 11:47:34 +0100
Subject: [PATCH 253/836] chore(deps): update operator framework (main) (#6370)
https://github.com/operator-framework/operator-registry `v1.48.0` -> `v1.49.0`
https://github.com/redhat-openshift-ecosystem/openshift-preflight `1.10.2` -> `1.11.1`
---
Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index c4f7a65e3d..a80d95dfb7 100644
--- a/Makefile
+++ b/Makefile
@@ -47,8 +47,8 @@ GORELEASER_VERSION ?= v2.5.0
SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.38.0
-OPM_VERSION ?= v1.48.0
-PREFLIGHT_VERSION ?= 1.10.2
+OPM_VERSION ?= v1.49.0
+PREFLIGHT_VERSION ?= 1.11.1
OPENSHIFT_VERSIONS ?= v4.12-v4.18
ARCH ?= amd64
From 27fb8a6662dead86aec744f5ba7dc4268f15dd40 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 20 Dec 2024 14:03:16 +0100
Subject: [PATCH 254/836] fix(deps): update module github.com/onsi/ginkgo/v2 to
v2.22.1 (main) (#6391)
---
go.mod | 6 +++---
go.sum | 12 ++++++------
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/go.mod b/go.mod
index fe86702085..7aad95057a 100644
--- a/go.mod
+++ b/go.mod
@@ -25,7 +25,7 @@ require (
github.com/lib/pq v1.10.9
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
- github.com/onsi/ginkgo/v2 v2.22.0
+ github.com/onsi/ginkgo/v2 v2.22.1
github.com/onsi/gomega v1.36.1
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2
github.com/prometheus/client_golang v1.20.5
@@ -70,7 +70,7 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
+ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
@@ -107,7 +107,7 @@ require (
golang.org/x/sys v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
- golang.org/x/tools v0.26.0 // indirect
+ golang.org/x/tools v0.28.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/protobuf v1.36.0 // indirect
diff --git a/go.sum b/go.sum
index e9a085e58a..096950aade 100644
--- a/go.sum
+++ b/go.sum
@@ -71,8 +71,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -142,8 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
-github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
+github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@@ -254,8 +254,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
-golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
+golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
From 6d2e5aa6e901a149fb4ac9b7994c1e62a4c1eb4f Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 20 Dec 2024 16:33:30 +0100
Subject: [PATCH 255/836] chore(deps): update dependency
kubernetes-csi/external-attacher to v4.8.0 (main) (#6392)
---
hack/setup-cluster.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index e668d5097e..7bd5e66072 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -30,7 +30,7 @@ CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
EXTERNAL_PROVISIONER_VERSION=v5.1.0
EXTERNAL_RESIZER_VERSION=v1.12.0
-EXTERNAL_ATTACHER_VERSION=v4.7.0
+EXTERNAL_ATTACHER_VERSION=v4.8.0
K8S_VERSION=${K8S_VERSION-}
KUBECTL_VERSION=${KUBECTL_VERSION-}
CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION}
From 98541cd3fe578fc37ffc4149c997ef61d08503fd Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 20 Dec 2024 18:08:44 +0100
Subject: [PATCH 256/836] chore: add missing kinds to `groupversion_info`
(#6390)
This patch adds the missing kinds to the groupversion_info file.
This can be useful when referencing CRDs `kind` value.
Signed-off-by: Armando Ruocco
Signed-off-by: Marco Nenciarini
Co-authored-by: Marco Nenciarini
---
api/v1/groupversion_info.go | 33 +++++++++++++++------------------
1 file changed, 15 insertions(+), 18 deletions(-)
diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go
index 44bab3db5b..bb665cb83b 100644
--- a/api/v1/groupversion_info.go
+++ b/api/v1/groupversion_info.go
@@ -24,24 +24,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
-var (
- // GroupVersion is group version used to register these objects
- GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"}
-
- // ClusterGVK is the triple to reach Cluster resources in k8s
- ClusterGVK = schema.GroupVersionResource{
- Group: GroupVersion.Group,
- Version: GroupVersion.Version,
- Resource: "clusters",
- }
-
- // PoolerGVK is the triple to reach Pooler resources in k8s
- PoolerGVK = schema.GroupVersionResource{
- Group: GroupVersion.Group,
- Version: GroupVersion.Version,
- Resource: "poolers",
- }
-
+const (
// ClusterKind is the kind name of Clusters
ClusterKind = "Cluster"
@@ -57,6 +40,20 @@ var (
// ClusterImageCatalogKind is the kind name of the cluster-wide image catalogs
ClusterImageCatalogKind = "ClusterImageCatalog"
+ // PublicationKind is the kind name of publications
+ PublicationKind = "Publication"
+
+ // SubscriptionKind is the kind name of subscriptions
+ SubscriptionKind = "Subscription"
+
+ // DatabaseKind is the kind name of databases
+ DatabaseKind = "Database"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"}
+
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
From 74507bb789923259f00c1c5ca15d140458e30fde Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Sun, 22 Dec 2024 11:15:23 +0100
Subject: [PATCH 257/836] feat: check the number of spec.schedule fields
(#5396)
The `schedule` field of `scheduledbackup.spec` can have 5 or 6
parameters as defined by the [Go cron package
format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format)
This syntax may be misleading when using just 5 parameters, as the
`seconds` field is not included.
This patch improves the webhook to raise a warning when a schedule
specification has just 5 fields while retaining compatibility.
Closes #5380
Signed-off-by: Pierrick
Signed-off-by: Leonardo Cecchi
Signed-off-by: Marco Nenciarini
Co-authored-by: Leonardo Cecchi
Co-authored-by: Marco Nenciarini
---
api/v1/scheduledbackup_funcs_test.go | 6 ++++--
api/v1/scheduledbackup_webhook.go | 20 ++++++++++++++------
api/v1/scheduledbackup_webhook_test.go | 25 +++++++++++++++++++++----
3 files changed, 39 insertions(+), 12 deletions(-)
diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go
index 9ef98a3692..d4da0915ea 100644
--- a/api/v1/scheduledbackup_funcs_test.go
+++ b/api/v1/scheduledbackup_funcs_test.go
@@ -77,7 +77,8 @@ var _ = Describe("Scheduled backup", func() {
Schedule: "* * * * * *",
},
}
- result := scheduledBackup.validate()
+ warnings, result := scheduledBackup.validate()
+ Expect(warnings).To(BeEmpty())
Expect(result).To(HaveLen(1))
Expect(result[0].Field).To(Equal("spec.online"))
})
@@ -90,7 +91,8 @@ var _ = Describe("Scheduled backup", func() {
Schedule: "* * * * * *",
},
}
- result := scheduledBackup.validate()
+ warnings, result := scheduledBackup.validate()
+ Expect(warnings).To(BeEmpty())
Expect(result).To(HaveLen(1))
Expect(result[0].Field).To(Equal("spec.onlineConfiguration"))
})
diff --git a/api/v1/scheduledbackup_webhook.go b/api/v1/scheduledbackup_webhook.go
index e1aebeedf5..30be039614 100644
--- a/api/v1/scheduledbackup_webhook.go
+++ b/api/v1/scheduledbackup_webhook.go
@@ -17,6 +17,8 @@ limitations under the License.
package v1
import (
+ "strings"
+
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/robfig/cron"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -56,13 +58,11 @@ var _ webhook.Validator = &ScheduledBackup{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *ScheduledBackup) ValidateCreate() (admission.Warnings, error) {
- var allErrs field.ErrorList
scheduledBackupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace)
- allErrs = append(allErrs, r.validate()...)
-
+ warnings, allErrs := r.validate()
if len(allErrs) == 0 {
- return nil, nil
+ return warnings, nil
}
return nil, apierrors.NewInvalid(
@@ -82,15 +82,23 @@ func (r *ScheduledBackup) ValidateDelete() (admission.Warnings, error) {
return nil, nil
}
-func (r *ScheduledBackup) validate() field.ErrorList {
+func (r *ScheduledBackup) validate() (admission.Warnings, field.ErrorList) {
var result field.ErrorList
+ var warnings admission.Warnings
if _, err := cron.Parse(r.GetSchedule()); err != nil {
result = append(result,
field.Invalid(
field.NewPath("spec", "schedule"),
r.Spec.Schedule, err.Error()))
+ } else if len(strings.Fields(r.Spec.Schedule)) != 6 {
+ warnings = append(
+ warnings,
+ "Schedule parameter may not have the right number of arguments "+
+ "(usually six arguments are needed)",
+ )
}
+
if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() {
result = append(result, field.Invalid(
field.NewPath("spec", "method"),
@@ -118,5 +126,5 @@ func (r *ScheduledBackup) validate() field.ErrorList {
))
}
- return result
+ return warnings, result
}
diff --git a/api/v1/scheduledbackup_webhook_test.go b/api/v1/scheduledbackup_webhook_test.go
index b31e954741..0ef5043a97 100644
--- a/api/v1/scheduledbackup_webhook_test.go
+++ b/api/v1/scheduledbackup_webhook_test.go
@@ -31,7 +31,20 @@ var _ = Describe("Validate schedule", func() {
},
}
- result := schedule.validate()
+ warnings, result := schedule.validate()
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(BeEmpty())
+ })
+
+ It("warn the user if the schedule has a wrong number of arguments", func() {
+ schedule := &ScheduledBackup{
+ Spec: ScheduledBackupSpec{
+ Schedule: "1 2 3 4 5",
+ },
+ }
+
+ warnings, result := schedule.validate()
+ Expect(warnings).To(HaveLen(1))
Expect(result).To(BeEmpty())
})
@@ -42,7 +55,8 @@ var _ = Describe("Validate schedule", func() {
},
}
- result := schedule.validate()
+ warnings, result := schedule.validate()
+ Expect(warnings).To(BeEmpty())
Expect(result).To(HaveLen(1))
})
@@ -54,7 +68,9 @@ var _ = Describe("Validate schedule", func() {
},
}
utils.SetVolumeSnapshot(true)
- result := schedule.validate()
+
+ warnings, result := schedule.validate()
+ Expect(warnings).To(BeEmpty())
Expect(result).To(BeEmpty())
})
@@ -66,7 +82,8 @@ var _ = Describe("Validate schedule", func() {
},
}
utils.SetVolumeSnapshot(false)
- result := schedule.validate()
+ warnings, result := schedule.validate()
+ Expect(warnings).To(BeEmpty())
Expect(result).To(HaveLen(1))
Expect(result[0].Field).To(Equal("spec.method"))
})
From c76557213d92f8b7311a0bc72d36dc505e1cf79d Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Sun, 22 Dec 2024 16:48:13 +0100
Subject: [PATCH 258/836] docs: fix genref config (#6422)
Signed-off-by: Marco Nenciarini
---
docs/config.yaml | 18 +++++++++--------
docs/src/cloudnative-pg.v1.md | 20 +++++++++++++------
...cluster-example-with-backup-scaleway.yaml} | 0
3 files changed, 24 insertions(+), 14 deletions(-)
rename docs/src/samples/{cluster-exemple-with-backup-scaleway.yaml => cluster-example-with-backup-scaleway.yaml} (100%)
diff --git a/docs/config.yaml b/docs/config.yaml
index 54ecf6e949..7b260f3bc8 100644
--- a/docs/config.yaml
+++ b/docs/config.yaml
@@ -32,14 +32,16 @@ externalPackages:
hideTypePatterns:
- "ParseError$"
- # We cannot exclude all `List$` because we declare PluginConfigurationList
- - "BackupList$"
- - "ClusterList$"
- - "ClusterImageCatalogList$"
- - "DatabaseList$"
- - "ImageCatalogList$"
- - "PoolerList$"
- - "ScheduledBackupList$"
+ # We cannot exclude all `List$` because we declare PluginConfigurationList and ExternalClusterList
+ - "\\.BackupList$"
+ - "\\.ClusterList$"
+ - "\\.ClusterImageCatalogList$"
+ - "\\.DatabaseList$"
+ - "\\.ImageCatalogList$"
+ - "\\.PoolerList$"
+ - "\\.ScheduledBackupList$"
+ - "\\.PublicationList$"
+ - "\\.SubscriptionList$"
markdownDisabled: false
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 48b53866ef..fa9d2c9242 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -229,9 +229,6 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
## Publication {#postgresql-cnpg-io-v1-Publication}
-**Appears in:**
-
-
Publication is the Schema for the publications API
@@ -303,9 +300,6 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
## Subscription {#postgresql-cnpg-io-v1-Subscription}
-**Appears in:**
-
-
Subscription is the Schema for the subscriptions API
@@ -2712,6 +2706,20 @@ storage
+## ExternalClusterList {#postgresql-cnpg-io-v1-ExternalClusterList}
+
+(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.ExternalCluster`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec)
+
+
+ExternalClusterList is a list of external clusters
+
+
+
+
## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef}
diff --git a/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml b/docs/src/samples/cluster-example-with-backup-scaleway.yaml
similarity index 100%
rename from docs/src/samples/cluster-exemple-with-backup-scaleway.yaml
rename to docs/src/samples/cluster-example-with-backup-scaleway.yaml
From bef05706e1757634882b5a22dc7b4b41da650c20 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sun, 22 Dec 2024 21:28:28 +0100
Subject: [PATCH 259/836] fix(deps): update module github.com/jackc/pgx/v5 to
v5.7.2 (main) (#6416)
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index 7aad95057a..78f66c0b64 100644
--- a/go.mod
+++ b/go.mod
@@ -18,7 +18,7 @@ require (
github.com/go-logr/logr v1.4.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0
- github.com/jackc/pgx/v5 v5.7.1
+ github.com/jackc/pgx/v5 v5.7.2
github.com/jackc/puddle/v2 v2.2.2
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0
@@ -99,7 +99,7 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
- golang.org/x/crypto v0.30.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
diff --git a/go.sum b/go.sum
index 096950aade..8c96643b6a 100644
--- a/go.sum
+++ b/go.sum
@@ -89,8 +89,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
-github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
-github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
+github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI=
+github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -215,8 +215,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
-golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
From 4334d111dd56355b5a405faa1759cc662cf2a28b Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Mon, 23 Dec 2024 10:32:35 +0100
Subject: [PATCH 260/836] ci: stop testing Postgres 12 (#6425)
Signed-off-by: Marco Nenciarini
---
.github/pg_versions.json | 4 ----
.github/postgres-versions-update.py | 2 +-
2 files changed, 1 insertion(+), 5 deletions(-)
diff --git a/.github/pg_versions.json b/.github/pg_versions.json
index 119882aec3..a6a9696f2b 100644
--- a/.github/pg_versions.json
+++ b/.github/pg_versions.json
@@ -18,9 +18,5 @@
"13": [
"13.18",
"13.17"
- ],
- "12": [
- "12.22",
- "12.21"
]
}
\ No newline at end of file
diff --git a/.github/postgres-versions-update.py b/.github/postgres-versions-update.py
index 25ce0402d1..92e5219bef 100644
--- a/.github/postgres-versions-update.py
+++ b/.github/postgres-versions-update.py
@@ -21,7 +21,7 @@
from packaging import version
from subprocess import check_output
-min_supported_major = 12
+min_supported_major = 13
pg_repo_name = "cloudnative-pg/postgresql"
pg_version_re = re.compile(r"^(\d+)(?:\.\d+|beta\d+|rc\d+|alpha\d+)(-\d+)?$")
From ed9105de879f770cc3d7484a8130386b7301516e Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Mon, 23 Dec 2024 10:46:12 +0100
Subject: [PATCH 261/836] chore: remove ExternalClusterList type (#6426)
Our documentation generator is not able to handle that type alias, so
let's get rid of it.
Closes: #6427
Signed-off-by: Leonardo Cecchi
Signed-off-by: Marco Nenciarini
Co-authored-by: Marco Nenciarini
---
.wordlist-en-custom.txt | 1 -
api/v1/cluster_funcs.go | 8 +-
api/v1/cluster_types.go | 5 +-
api/v1/zz_generated.deepcopy.go | 23 +-----
docs/config.yaml | 2 +-
docs/src/cloudnative-pg.v1.md | 81 +++++++++++++++++--
internal/cmd/manager/walrestore/cmd.go | 5 +-
internal/controller/cluster_controller.go | 5 +-
.../subscription_controller_test.go | 4 +-
9 files changed, 93 insertions(+), 41 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 42d7a6fa5b..47260d5468 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -155,7 +155,6 @@ EphemeralVolumeSource
EphemeralVolumesSizeLimit
EphemeralVolumesSizeLimitConfiguration
ExternalCluster
-ExternalClusterList
FQDN
Fei
Filesystem
diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go
index 3ffba1a3da..b0345cdc16 100644
--- a/api/v1/cluster_funcs.go
+++ b/api/v1/cluster_funcs.go
@@ -82,13 +82,13 @@ func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []stri
return pluginNames
}
-// GetEnabledPluginNames gets the name of the plugins that are
+// GetExternalClustersEnabledPluginNames gets the name of the plugins that are
// involved in the reconciliation of this external cluster list. This
// list is usually composed by the plugins that need to be active to
// recover data from the external clusters.
-func (externalClusterList ExternalClusterList) GetEnabledPluginNames() (result []string) {
- pluginNames := make([]string, 0, len(externalClusterList))
- for _, externalCluster := range externalClusterList {
+func GetExternalClustersEnabledPluginNames(externalClusters []ExternalCluster) (result []string) {
+ pluginNames := make([]string, 0, len(externalClusters))
+ for _, externalCluster := range externalClusters {
if externalCluster.PluginConfiguration != nil {
pluginNames = append(pluginNames, externalCluster.PluginConfiguration.Name)
}
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index 2814f18cb6..ae6e6a181e 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -422,7 +422,7 @@ type ClusterSpec struct {
// The list of external clusters which are used in the configuration
// +optional
- ExternalClusters ExternalClusterList `json:"externalClusters,omitempty"`
+ ExternalClusters []ExternalCluster `json:"externalClusters,omitempty"`
// The instances' log level, one of the following values: error, warning, info (default), debug, trace
// +kubebuilder:default:=info
@@ -2060,9 +2060,6 @@ type ClusterMonitoringTLSConfiguration struct {
Enabled bool `json:"enabled,omitempty"`
}
-// ExternalClusterList is a list of external clusters
-type ExternalClusterList []ExternalCluster
-
// ExternalCluster represents the connection parameters to an
// external cluster which is used in the other sections of the configuration
type ExternalCluster struct {
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index fbaec944e2..8c6a2a71e6 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -790,7 +790,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
}
if in.ExternalClusters != nil {
in, out := &in.ExternalClusters, &out.ExternalClusters
- *out = make(ExternalClusterList, len(*in))
+ *out = make([]ExternalCluster, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1258,27 +1258,6 @@ func (in *ExternalCluster) DeepCopy() *ExternalCluster {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in ExternalClusterList) DeepCopyInto(out *ExternalClusterList) {
- {
- in := &in
- *out = make(ExternalClusterList, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalClusterList.
-func (in ExternalClusterList) DeepCopy() ExternalClusterList {
- if in == nil {
- return nil
- }
- out := new(ExternalClusterList)
- in.DeepCopyInto(out)
- return *out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageCatalog) DeepCopyInto(out *ImageCatalog) {
*out = *in
diff --git a/docs/config.yaml b/docs/config.yaml
index 7b260f3bc8..9717ffa456 100644
--- a/docs/config.yaml
+++ b/docs/config.yaml
@@ -32,7 +32,7 @@ externalPackages:
hideTypePatterns:
- "ParseError$"
- # We cannot exclude all `List$` because we declare PluginConfigurationList and ExternalClusterList
+ # We cannot exclude all `List$` because we declare PluginConfigurationList
- "\\.BackupList$"
- "\\.ClusterList$"
- "\\.ClusterImageCatalogList$"
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index fa9d2c9242..3db291f4cd 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -1845,7 +1845,7 @@ it can be with a switchover (switchover) or in-place (restart
externalClusters
-ExternalClusterList
+[]ExternalCluster
The list of external clusters which are used in the configuration
@@ -2706,19 +2706,89 @@ storage
-## ExternalClusterList {#postgresql-cnpg-io-v1-ExternalClusterList}
+## ExternalCluster {#postgresql-cnpg-io-v1-ExternalCluster}
-(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.ExternalCluster`)
**Appears in:**
- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec)
-ExternalClusterList is a list of external clusters
-
+ExternalCluster represents the connection parameters to an
+external cluster which is used in the other sections of the configuration
+
+Field Description
+
+name [Required]
+string
+
+
+ The server name, required
+
+
+connectionParameters
+map[string]string
+
+
+ The list of connection parameters, such as dbname, host, username, etc
+
+
+sslCert
+core/v1.SecretKeySelector
+
+
+ The reference to an SSL certificate to be used to connect to this
+instance
+
+
+sslKey
+core/v1.SecretKeySelector
+
+
+ The reference to an SSL private key to be used to connect to this
+instance
+
+
+sslRootCert
+core/v1.SecretKeySelector
+
+
+ The reference to an SSL CA public key to be used to connect to this
+instance
+
+
+password
+core/v1.SecretKeySelector
+
+
+ The reference to the password to be used to connect to the server.
+If a password is provided, CloudNativePG creates a PostgreSQL
+passfile at /controller/external/NAME/pass (where "NAME" is the
+cluster's name). This passfile is automatically referenced in the
+connection string when establishing a connection to the remote
+PostgreSQL server from the current PostgreSQL Cluster. This ensures
+secure and efficient password management for external clusters.
+
+
+barmanObjectStore
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
+
+
+ The configuration for the barman-cloud tool suite
+
+
+plugin [Required]
+PluginConfiguration
+
+
+ The configuration of the plugin that is taking care
+of WAL archiving and backups for this external cluster
+
+
+
+
## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef}
@@ -3616,6 +3686,7 @@ the operator calls PgBouncer's PAUSE and RESUME comman
**Appears in:**
+- [ExternalCluster](#postgresql-cnpg-io-v1-ExternalCluster)
PluginConfiguration specifies a plugin that need to be loaded for this
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index 4accc2a023..8b1835a271 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -262,7 +262,10 @@ func restoreWALViaPlugins(
availablePluginNamesSet := stringset.From(availablePluginNames)
enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
- enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...)
+ enabledPluginNames = append(
+ enabledPluginNames,
+ apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)...,
+ )
enabledPluginNamesSet := stringset.From(enabledPluginNames)
client, err := pluginClient.WithPlugins(
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 6194505985..958f98f1bd 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -176,7 +176,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
// Load the plugins required to bootstrap and reconcile this cluster
enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
- enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...)
+ enabledPluginNames = append(
+ enabledPluginNames,
+ apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)...,
+ )
pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second)
defer cancelPluginLoading()
diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go
index f6afdc0c4e..f699324805 100644
--- a/internal/management/controller/subscription_controller_test.go
+++ b/internal/management/controller/subscription_controller_test.go
@@ -69,8 +69,8 @@ var _ = Describe("Managed subscription controller tests", func() {
TargetPrimary: "cluster-example-1",
},
Spec: apiv1.ClusterSpec{
- ExternalClusters: apiv1.ExternalClusterList{
- apiv1.ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
+ {
Name: "cluster-other",
ConnectionParameters: map[string]string{
"host": "localhost",
From 7f8260913ca1f306e81bf3131e46c7fe5ca5cc01 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Mon, 23 Dec 2024 11:20:01 +0100
Subject: [PATCH 262/836] chore: remove PluginConfigurationList type (#6431)
Our documentation generator is not able to cope with that, so let's
remove it.
Closes: #6430
Signed-off-by: Leonardo Cecchi
---
.wordlist-en-custom.txt | 1 -
api/v1/cluster_funcs.go | 4 ++--
api/v1/cluster_types.go | 6 +----
api/v1/zz_generated.deepcopy.go | 23 +------------------
docs/config.yaml | 1 -
docs/src/cloudnative-pg.v1.md | 19 +++------------
internal/cmd/manager/walrestore/cmd.go | 2 +-
internal/controller/backup_controller.go | 6 ++++-
internal/controller/cluster_controller.go | 2 +-
pkg/management/postgres/archiver/archiver.go | 3 ++-
.../postgres/webserver/plugin_backup.go | 6 ++++-
11 files changed, 21 insertions(+), 52 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 47260d5468..3d1524d0ad 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -304,7 +304,6 @@ PgBouncerSecrets
PgBouncerSecretsVersions
PgBouncerSpec
Philippe
-PluginConfigurationList
PluginStatus
PoLA
PodAffinity
diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go
index b0345cdc16..a116aee695 100644
--- a/api/v1/cluster_funcs.go
+++ b/api/v1/cluster_funcs.go
@@ -70,9 +70,9 @@ func (o OnlineConfiguration) GetImmediateCheckpoint() bool {
return *o.ImmediateCheckpoint
}
-// GetEnabledPluginNames gets the name of the plugins that are involved
+// GetPluginConfigurationEnabledPluginNames gets the name of the plugins that are involved
// in the reconciliation of this cluster
-func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []string) {
+func GetPluginConfigurationEnabledPluginNames(pluginList []PluginConfiguration) (result []string) {
pluginNames := make([]string, 0, len(pluginList))
for _, pluginDeclaration := range pluginList {
if pluginDeclaration.IsEnabled() {
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index ae6e6a181e..57dae17184 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -475,7 +475,7 @@ type ClusterSpec struct {
// The plugins configuration, containing
// any plugin to be loaded with the corresponding configuration
// +optional
- Plugins PluginConfigurationList `json:"plugins,omitempty"`
+ Plugins []PluginConfiguration `json:"plugins,omitempty"`
// The configuration of the probes to be injected
// in the PostgreSQL Pods.
@@ -534,10 +534,6 @@ type Probe struct {
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
}
-// PluginConfigurationList represent a set of plugin with their
-// configuration parameters
-type PluginConfigurationList []PluginConfiguration
-
const (
// PhaseSwitchover when a cluster is changing the primary node
PhaseSwitchover = "Switchover in progress"
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index 8c6a2a71e6..b4b9d5b295 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -838,7 +838,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
}
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
- *out = make(PluginConfigurationList, len(*in))
+ *out = make([]PluginConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -1860,27 +1860,6 @@ func (in *PluginConfiguration) DeepCopy() *PluginConfiguration {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in PluginConfigurationList) DeepCopyInto(out *PluginConfigurationList) {
- {
- in := &in
- *out = make(PluginConfigurationList, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfigurationList.
-func (in PluginConfigurationList) DeepCopy() PluginConfigurationList {
- if in == nil {
- return nil
- }
- out := new(PluginConfigurationList)
- in.DeepCopyInto(out)
- return *out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginStatus) DeepCopyInto(out *PluginStatus) {
*out = *in
diff --git a/docs/config.yaml b/docs/config.yaml
index 9717ffa456..94e4522c2a 100644
--- a/docs/config.yaml
+++ b/docs/config.yaml
@@ -32,7 +32,6 @@ externalPackages:
hideTypePatterns:
- "ParseError$"
- # We cannot exclude all `List$` because we declare PluginConfigurationList
- "\\.BackupList$"
- "\\.ClusterList$"
- "\\.ClusterImageCatalogList$"
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 3db291f4cd..1734270290 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -1919,7 +1919,7 @@ development/staging purposes.
plugins
-PluginConfigurationList
+[]PluginConfiguration
The plugins configuration, containing
@@ -3686,6 +3686,8 @@ the operator calls PgBouncer's PAUSE and RESUME comman
**Appears in:**
+- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec)
+
- [ExternalCluster](#postgresql-cnpg-io-v1-ExternalCluster)
@@ -3720,21 +3722,6 @@ cluster to be reconciled
-## PluginConfigurationList {#postgresql-cnpg-io-v1-PluginConfigurationList}
-
-(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.PluginConfiguration`)
-
-**Appears in:**
-
-- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec)
-
-
-PluginConfigurationList represent a set of plugin with their
-configuration parameters
-
-
-
-
## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus}
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index 8b1835a271..403dedb063 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -261,7 +261,7 @@ func restoreWALViaPlugins(
availablePluginNamesSet := stringset.From(availablePluginNames)
- enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
+ enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)
enabledPluginNames = append(
enabledPluginNames,
apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)...,
diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go
index 29c6aea6f9..5b5906bccd 100644
--- a/internal/controller/backup_controller.go
+++ b/internal/controller/backup_controller.go
@@ -136,7 +136,11 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
}
// Load the required plugins
- pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...)
+ pluginClient, err := cnpgiClient.WithPlugins(
+ ctx,
+ r.Plugins,
+ apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)...,
+ )
if err != nil {
contextLogger.Error(err, "Error loading plugins, retrying")
return ctrl.Result{}, err
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 958f98f1bd..bc46d91bae 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -175,7 +175,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
ctx = cluster.SetInContext(ctx)
// Load the plugins required to bootstrap and reconcile this cluster
- enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames()
+ enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)
enabledPluginNames = append(
enabledPluginNames,
apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)...,
diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go
index ccf24efd36..9e6feed0a8 100644
--- a/pkg/management/postgres/archiver/archiver.go
+++ b/pkg/management/postgres/archiver/archiver.go
@@ -264,7 +264,8 @@ func archiveWALViaPlugins(
defer plugins.Close()
availablePluginNamesSet := stringset.From(availablePluginNames)
- enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames())
+ enabledPluginNamesSet := stringset.From(
+ apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins))
client, err := pluginClient.WithPlugins(
ctx,
diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go
index 2e6f58f5b6..f694f58716 100644
--- a/pkg/management/postgres/webserver/plugin_backup.go
+++ b/pkg/management/postgres/webserver/plugin_backup.go
@@ -90,7 +90,11 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) {
}
defer plugins.Close()
- cli, err := pluginClient.WithPlugins(ctx, plugins, b.Cluster.Spec.Plugins.GetEnabledPluginNames()...)
+ cli, err := pluginClient.WithPlugins(
+ ctx,
+ plugins,
+ apiv1.GetPluginConfigurationEnabledPluginNames(b.Cluster.Spec.Plugins)...,
+ )
if err != nil {
b.markBackupAsFailed(ctx, err)
return
From 9ae7634e72a222d18eec8cdbea672a1c79eca914 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Mon, 23 Dec 2024 11:44:57 +0100
Subject: [PATCH 263/836] docs: Release notes for 1.25.0, 1.24.2, 1.23.6
(#6424)
Closes #6420
Signed-off-by: Marco Nenciarini
Signed-off-by: Gabriele Bartolini
Co-authored-by: Gabriele Bartolini
---
.wordlist-en-custom.txt | 1 +
docs/src/backup.md | 5 +++++
docs/src/preview_version.md | 2 ++
docs/src/recovery.md | 7 ++++++-
docs/src/release_notes.md | 5 +++--
docs/src/release_notes/v1.25.md | 27 ++++++++++++++++++++++++---
docs/src/supported_releases.md | 20 ++++++++++----------
7 files changed, 51 insertions(+), 16 deletions(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 3d1524d0ad..16a65fde9c 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -964,6 +964,7 @@ minKubeVersion
minSyncReplicas
minikube
minio
+misconfigurations
mmap
monitoringconfiguration
mountPath
diff --git a/docs/src/backup.md b/docs/src/backup.md
index 4c3f8cb172..cffda0e7fa 100644
--- a/docs/src/backup.md
+++ b/docs/src/backup.md
@@ -41,6 +41,11 @@ On the other hand, CloudNativePG supports two ways to store physical base backup
the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html)
that provide snapshotting capabilities.
+!!! Info
+ Starting with version 1.25, CloudNativePG includes experimental support for
+ backup and recovery using plugins, such as the
+ [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud).
+
## WAL archive
The WAL archive in PostgreSQL is at the heart of **continuous backup**, and it
diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md
index 8f354f67ae..d2e2702a9f 100644
--- a/docs/src/preview_version.md
+++ b/docs/src/preview_version.md
@@ -35,6 +35,7 @@ are not backwards compatible and could be removed entirely.
There are currently no preview versions available.
+
diff --git a/docs/src/recovery.md b/docs/src/recovery.md
index 1e0a9b931a..e53db596ba 100644
--- a/docs/src/recovery.md
+++ b/docs/src/recovery.md
@@ -23,6 +23,11 @@ WAL files are pulled from the defined *recovery object store*.
Base backups can be taken either on object stores or using volume snapshots.
+!!! Info
+ Starting with version 1.25, CloudNativePG includes experimental support for
+ backup and recovery using plugins, such as the
+ [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud).
+
You can achieve recovery from a *recovery object store* in two ways:
- We recommend using a recovery object store, that is, a backup of another cluster
@@ -64,7 +69,7 @@ metadata:
name: cluster-restore
spec:
[...]
-
+
superuserSecret:
name: superuser-secret
diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md
index fe2a723507..71c503fb91 100644
--- a/docs/src/release_notes.md
+++ b/docs/src/release_notes.md
@@ -2,15 +2,16 @@
History of user-visible changes for CloudNativePG, classified for each minor release.
-- [CloudNativePG 1.25 - Release Candidate](release_notes/v1.25.md)
+
+- [CloudNativePG 1.25](release_notes/v1.25.md)
- [CloudNativePG 1.24](release_notes/v1.24.md)
-- [CloudNativePG 1.23](release_notes/v1.23.md)
For information on the community support policy for CloudNativePG, please
refer to ["Supported releases"](supported_releases.md).
Older releases:
+- [CloudNativePG 1.23](release_notes/v1.23.md)
- [CloudNativePG 1.22](release_notes/old/v1.22.md)
- [CloudNativePG 1.21](release_notes/old/v1.21.md)
- [CloudNativePG 1.20](release_notes/old/v1.20.md)
diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md
index 4996532171..0e8ca6b85a 100644
--- a/docs/src/release_notes/v1.25.md
+++ b/docs/src/release_notes/v1.25.md
@@ -6,9 +6,9 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25)
on the release branch in GitHub.
-## Version 1.25.0-rc1
+## Version 1.25.0
-**Release Date:** December 9, 2024
+**Release Date:** December 23, 2024
### Features
@@ -20,6 +20,17 @@ on the release branch in GitHub.
for declarative management of PostgreSQL logical replication. These simplify
replication setup and facilitate online migrations to CloudNativePG. (#5329)
+- **Experimental Support for CNPG-I**: Introducing CNPG-I (CloudNativePG
+ Interface), a standardized framework designed to extend CloudNativePG
+ functionality through third-party plugins and foster the growth of the CNPG
+ ecosystem.
+ The [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) serves as a live
+ example, illustrating how plugins can be developed to enhance backup and
+ recovery workflows. Although CNPG-I support is currently experimental, it
+ offers a powerful approach to extending CloudNativePG without modifying the
+ operator’s core code—akin to PostgreSQL extensions. We welcome community
+ feedback and contributions to shape this exciting new capability.
+
### Enhancements
- Add the `dataDurability` option to the `.spec.postgresql.synchronous` stanza,
@@ -34,6 +45,8 @@ on the release branch in GitHub.
larger deployments out of the box. (#5678)
- Add the `cnpg.io/userType` label to secrets generated for predefined users,
specifically `superuser` and `app`. (#4392)
+- Improved validation for the `spec.schedule` field in ScheduledBackups,
+ raising warnings for potential misconfigurations. (#5396)
- `cnpg` plugin:
- Enhance the `backup` command to support plugins. (#6045)
- Honor the `User-Agent` header in HTTP requests with the API server. (#6153)
@@ -48,6 +61,8 @@ on the release branch in GitHub.
all previously generated `PersistentVolumeClaims` are missing. (#6170)
- Fix the parsing of the `synchronous_standby_names` GUC when
`.spec.postgresql.synchronous.method` is set to `first`. (#5955)
+- Resolved a potential race condition when patching certain conditions
+ in CRD statuses, improving reliability in concurrent updates. (#6328)
- Correct role changes to apply at the transaction level instead of the
database context. (#6064)
- Remove the `primary_slot_name` definition from the `override.conf` file on
@@ -57,13 +72,19 @@ on the release branch in GitHub.
from within the container. (#6247)
- Remove unnecessary updates to the Cluster status when verifying changes in
the image catalog. (#6277)
+- Prevent panic during recovery from an external server without proper backup
+ configuration. (#6300)
+- Resolved a key collision issue in structured logs, where the name field was
+ inconsistently used to log two distinct values. (#6324)
+- Ensure proper quoting of the inRoles field in SQL statements to prevent
+ syntax errors in generated SQL during role management. (#6346)
- `cnpg` plugin:
- Ensure the `kubectl` context is properly passed in the `psql` command. (#6257)
- Avoid displaying physical backups block when empty with `status` command. (#5998)
### Supported Versions
-- **Kubernetes**: 1.31, 1.30, and 1.29
+- **Kubernetes**: 1.32, 1.31, 1.30, and 1.29
- **PostgreSQL**: 17, 16, 15, 14, and 13
- Default image: PostgreSQL 17.2
- Officially dropped support for PostgreSQL 12
diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md
index afdffb9ebd..446ed26f99 100644
--- a/docs/src/supported_releases.md
+++ b/docs/src/supported_releases.md
@@ -80,11 +80,11 @@ Git tags for versions are prefixed with `v`.
## Support status of CloudNativePG releases
-| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
-|-----------------|----------------------|----------------|---------------------|-------------------------------|---------------------------|-----------------------------|
-| 1.25.x | No (RC) | Dec XX, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 (!) | 1.27, 1.28 | 13 - 17 |
-| 1.24.x | Yes | Aug 22, 2024 | Feb XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 |
-| main | No, development only | | | | | 13 - 17 |
+| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
+|-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------|
+| 1.25.x | Yes | Dec 23, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 |
+| 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 |
+| main | No, development only | | | | | 13 - 17 |
@@ -119,11 +119,11 @@ version of PostgreSQL, we might not be able to help you.
## Upcoming releases
-| Version | Release date | End of life |
-|-----------------|-----------------------|---------------------------|
-| 1.25.0 | Dec, 2024 | May/Jun, 2025 |
-| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 |
-| 1.27.0 | Jun, 2025 | Dec, 2025 |
+| Version | Release date | End of life |
+|---------|--------------|---------------|
+| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 |
+| 1.27.0 | Jun, 2025 | Dec, 2025 |
+| 1.28.0 | Sep, 2025 | Mar/Apr, 2025 |
!!! Note
Feature freeze occurs 1-2 weeks before the release, at which point a
From 55a3137eabef3daf1ca4609121087dd6723c43e1 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Mon, 23 Dec 2024 11:59:52 +0100
Subject: [PATCH 264/836] docs: Release notes for 1.24.2 and 1.23.6 (#6438)
Relates #6420
Signed-off-by: Gabriele Bartolini
---
docs/src/release_notes/v1.23.md | 51 +++++++++++++++++++++++++++++++++
docs/src/release_notes/v1.24.md | 46 +++++++++++++++++++++++++++++
2 files changed, 97 insertions(+)
diff --git a/docs/src/release_notes/v1.23.md b/docs/src/release_notes/v1.23.md
index 8aaf2f773f..1247a53955 100644
--- a/docs/src/release_notes/v1.23.md
+++ b/docs/src/release_notes/v1.23.md
@@ -6,6 +6,57 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.23)
on the release branch in GitHub.
+## Version 1.23.6
+
+**Release Date:** December 23, 2024
+
+!!! Warning
+ This is the final release in the 1.23.x series.
+ Users are strongly encouraged to upgrade to a newer minor version, as 1.23
+ is no longer supported.
+
+### Enhancements
+
+- Enable customization of startup, liveness, and readiness probes through the
+ `.spec.probes` stanza. (#6266)
+- Add the `cnpg.io/userType` label to secrets generated for predefined users,
+ specifically `superuser` and `app`. (#4392)
+- Improved validation for the `spec.schedule` field in ScheduledBackups,
+ raising warnings for potential misconfigurations. (#5396)
+- `cnpg` plugin:
+ - Honor the `User-Agent` header in HTTP requests with the API server. (#6153)
+
+### Bug Fixes
+
+- Ensure the former primary flushes its WAL file queue to the archive before
+ re-synchronizing as a replica, reducing recovery times and enhancing data
+ consistency during failovers. (#6141)
+- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265)
+- Update the operator to set the cluster phase to `Unrecoverable` when
+ all previously generated `PersistentVolumeClaims` are missing. (#6170)
+- Fix the parsing of the `synchronous_standby_names` GUC when
+ `.spec.postgresql.synchronous.method` is set to `first`. (#5955)
+- Resolved a potential race condition when patching certain conditions
+ in CRD statuses, improving reliability in concurrent updates. (#6328)
+- Correct role changes to apply at the transaction level instead of the
+ database context. (#6064)
+- Remove the `primary_slot_name` definition from the `override.conf` file on
+ the primary to ensure it is always empty. (#6219)
+- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods
+ to enable seamless access to the `pgbouncer` virtual database using `psql`
+ from within the container. (#6247)
+- Remove unnecessary updates to the Cluster status when verifying changes in
+ the image catalog. (#6277)
+- Prevent panic during recovery from an external server without proper backup
+ configuration. (#6300)
+- Resolved a key collision issue in structured logs, where the name field was
+ inconsistently used to log two distinct values. (#6324)
+- Ensure proper quoting of the inRoles field in SQL statements to prevent
+ syntax errors in generated SQL during role management. (#6346)
+- `cnpg` plugin:
+ - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257)
+ - Avoid displaying physical backups block when empty with `status` command. (#5998)
+
## Version 1.23.5
**Release date:** Oct 16, 2024
diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md
index 78182f8180..fdb59d023b 100644
--- a/docs/src/release_notes/v1.24.md
+++ b/docs/src/release_notes/v1.24.md
@@ -6,6 +6,52 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24)
on the release branch in GitHub.
+## Version 1.24.2
+
+**Release Date:** December 23, 2024
+
+### Enhancements
+
+- Enable customization of startup, liveness, and readiness probes through the
+ `.spec.probes` stanza. (#6266)
+- Add the `cnpg.io/userType` label to secrets generated for predefined users,
+ specifically `superuser` and `app`. (#4392)
+- Improved validation for the `spec.schedule` field in ScheduledBackups,
+ raising warnings for potential misconfigurations. (#5396)
+- `cnpg` plugin:
+ - Honor the `User-Agent` header in HTTP requests with the API server. (#6153)
+
+### Bug Fixes
+
+- Ensure the former primary flushes its WAL file queue to the archive before
+ re-synchronizing as a replica, reducing recovery times and enhancing data
+ consistency during failovers. (#6141)
+- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265)
+- Update the operator to set the cluster phase to `Unrecoverable` when
+ all previously generated `PersistentVolumeClaims` are missing. (#6170)
+- Fix the parsing of the `synchronous_standby_names` GUC when
+ `.spec.postgresql.synchronous.method` is set to `first`. (#5955)
+- Resolved a potential race condition when patching certain conditions
+ in CRD statuses, improving reliability in concurrent updates. (#6328)
+- Correct role changes to apply at the transaction level instead of the
+ database context. (#6064)
+- Remove the `primary_slot_name` definition from the `override.conf` file on
+ the primary to ensure it is always empty. (#6219)
+- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods
+ to enable seamless access to the `pgbouncer` virtual database using `psql`
+ from within the container. (#6247)
+- Remove unnecessary updates to the Cluster status when verifying changes in
+ the image catalog. (#6277)
+- Prevent panic during recovery from an external server without proper backup
+ configuration. (#6300)
+- Resolved a key collision issue in structured logs, where the name field was
+ inconsistently used to log two distinct values. (#6324)
+- Ensure proper quoting of the inRoles field in SQL statements to prevent
+ syntax errors in generated SQL during role management. (#6346)
+- `cnpg` plugin:
+ - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257)
+ - Avoid displaying physical backups block when empty with `status` command. (#5998)
+
## Version 1.24.1
**Release date:** Oct 16, 2024
From fb81a49d909db8616a8a47aab94e05a62ca6d50b Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Mon, 23 Dec 2024 14:11:51 +0100
Subject: [PATCH 265/836] docs: upgrade info to 1.25.0 (#6442)
Signed-off-by: Gabriele Bartolini
---
docs/src/installation_upgrade.md | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md
index 182ae94d44..cbc2580640 100644
--- a/docs/src/installation_upgrade.md
+++ b/docs/src/installation_upgrade.md
@@ -251,23 +251,24 @@ When versions are not directly upgradable, the old version needs to be
removed before installing the new one. This won't affect user data but
only the operator itself.
-
-
-### Upgrading to 1.24 from a previous minor version
!!! Warning
Every time you are upgrading to a higher minor release, make sure you
go through the release notes and upgrade instructions of all the
intermediate minor releases. For example, if you want to move
- from 1.22.x to 1.24, make sure you go through the release notes
- and upgrade instructions for 1.23 and 1.24.
+ from 1.23.x to 1.25, make sure you go through the release notes
+ and upgrade instructions for 1.24 and 1.25.
+
+No changes to existing 1.24 cluster configurations are required when upgrading
+to 1.25.
+
+### Upgrading to 1.24 from a previous minor version
#### From Replica Clusters to Distributed Topology
From bad5a251642655399eca392abf5d981668fbd8cc Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Mon, 23 Dec 2024 14:18:46 +0100
Subject: [PATCH 266/836] Version tag to 1.25.0 (#6443)
Signed-off-by: Gabriele Bartolini
Co-authored-by: Gabriele Bartolini
---
docs/src/installation_upgrade.md | 4 +-
docs/src/kubectl-plugin.md | 30 +-
pkg/versions/versions.go | 6 +-
releases/cnpg-1.25.0.yaml | 17771 +++++++++++++++++++++++++++++
4 files changed, 17791 insertions(+), 20 deletions(-)
create mode 100644 releases/cnpg-1.25.0.yaml
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md
index cbc2580640..5acbdbd854 100644
--- a/docs/src/installation_upgrade.md
+++ b/docs/src/installation_upgrade.md
@@ -7,12 +7,12 @@
The operator can be installed like any other resource in Kubernetes,
through a YAML manifest applied via `kubectl`.
-You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml)
+You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.0.yaml)
for this minor release as follows:
```sh
kubectl apply --server-side -f \
- https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml
+ https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.0.yaml
```
You can verify that with:
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index 45801e223c..d001a3397e 100644
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -30,11 +30,11 @@ them in your systems.
#### Debian packages
-For example, let's install the 1.25.0-rc1 release of the plugin, for an Intel based
+For example, let's install the 1.25.0 release of the plugin, for an Intel based
64 bit server. First, we download the right `.deb` file.
```sh
-wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.deb \
+wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/kubectl-cnpg_1.25.0_linux_x86_64.deb \
--output-document kube-plugin.deb
```
@@ -45,17 +45,17 @@ $ sudo dpkg -i kube-plugin.deb
Selecting previously unselected package cnpg.
(Reading database ... 6688 files and directories currently installed.)
Preparing to unpack kube-plugin.deb ...
-Unpacking cnpg (1.25.0-rc1) ...
-Setting up cnpg (1.25.0-rc1) ...
+Unpacking cnpg (1.25.0) ...
+Setting up cnpg (1.25.0) ...
```
#### RPM packages
-As in the example for `.rpm` packages, let's install the 1.25.0-rc1 release for an
+As in the example for `.rpm` packages, let's install the 1.25.0 release for an
Intel 64 bit machine. Note the `--output` flag to provide a file name.
```sh
-curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.rpm \
+curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/kubectl-cnpg_1.25.0_linux_x86_64.rpm \
--output kube-plugin.rpm
```
@@ -69,7 +69,7 @@ Dependencies resolved.
Package Architecture Version Repository Size
====================================================================================================
Installing:
- cnpg x86_64 1.25.0-rc1-1 @commandline 20 M
+ cnpg x86_64 1.25.0-1 @commandline 20 M
Transaction Summary
====================================================================================================
@@ -277,9 +277,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00
Instances status
Name Current LSN Replication role Status QoS Manager Version Node
---- ----------- ---------------- ------ --- --------------- ----
-sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker
-sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2
-sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker
+sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0 k8s-eu-worker
+sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2
+sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker
```
If you require more detailed status information, use the `--verbose` option (or
@@ -333,9 +333,9 @@ sandbox-primary primary 1 1 1
Instances status
Name Current LSN Replication role Status QoS Manager Version Node
---- ----------- ---------------- ------ --- --------------- ----
-sandbox-1 0/6053720 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker
-sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2
-sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker
+sandbox-1 0/6053720 Primary OK BestEffort 1.25.0 k8s-eu-worker
+sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2
+sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker
```
With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can
@@ -558,12 +558,12 @@ Archive: report_operator_.zip
```output
====== Begin of Previous Log =====
-2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
====== End of Previous Log =====
-2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
```
diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go
index 91f8dcc30e..c4b1c95414 100644
--- a/pkg/versions/versions.go
+++ b/pkg/versions/versions.go
@@ -20,13 +20,13 @@ package versions
const (
// Version is the version of the operator
- Version = "1.25.0-rc1"
+ Version = "1.25.0"
// DefaultImageName is the default image used by the operator to create pods
DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2"
// DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL
- DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1"
+ DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0"
)
// BuildInfo is a struct containing all the info about the build
@@ -36,7 +36,7 @@ type BuildInfo struct {
var (
// buildVersion injected during the build
- buildVersion = "1.25.0-rc1"
+ buildVersion = "1.25.0"
// buildCommit injected during the build
buildCommit = "none"
diff --git a/releases/cnpg-1.25.0.yaml b/releases/cnpg-1.25.0.yaml
new file mode 100644
index 0000000000..cbdfc4162b
--- /dev/null
+++ b/releases/cnpg-1.25.0.yaml
@@ -0,0 +1,17771 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-system
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: backups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Backup
+ listKind: BackupList
+ plural: backups
+ singular: backup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.method
+ name: Method
+ type: string
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.error
+ name: Error
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Backup is the Schema for the backups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the backup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ - plugin
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the backup. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without providing
+ explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ backupId:
+ description: The ID of the Barman backup
+ type: string
+ backupLabelFile:
+ description: Backup label file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ backupName:
+ description: The Name of the Barman backup
+ type: string
+ beginLSN:
+ description: The starting xlog
+ type: string
+ beginWal:
+ description: The starting WAL
+ type: string
+ commandError:
+ description: The backup command output in case of error
+ type: string
+ commandOutput:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data. This may not be populated in case of errors.
+ type: string
+ encryption:
+ description: Encryption method required to S3 API
+ type: string
+ endLSN:
+ description: The ending xlog
+ type: string
+ endWal:
+ description: The ending WAL
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ error:
+ description: The detected error
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google Cloud
+ Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage JSON
+ file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ instanceID:
+ description: Information to identify the instance where the backup
+ has been taken from
+ properties:
+ ContainerID:
+ description: The container ID
+ type: string
+ podName:
+ description: The pod name
+ type: string
+ type: object
+ method:
+ description: The backup method being used
+ type: string
+ online:
+ description: Whether the backup was online/hot (`true`) or offline/cold
+ (`false`)
+ type: boolean
+ phase:
+ description: The last backup status
+ type: string
+ pluginMetadata:
+ additionalProperties:
+ type: string
+ description: A map containing the plugin metadata
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without providing
+ explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the region
+ name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ snapshotBackupStatus:
+ description: Status of the volumeSnapshot backup
+ properties:
+ elements:
+ description: The elements list, populated with the gathered volume
+ snapshots
+ items:
+ description: BackupSnapshotElementStatus is a volume snapshot
+ that is part of a volume snapshot method backup
+ properties:
+ name:
+ description: Name is the snapshot resource name
+ type: string
+ tablespaceName:
+ description: |-
+ TablespaceName is the name of the snapshotted tablespace. Only set
+ when type is PG_TABLESPACE
+ type: string
+ type:
+ description: Type is tho role of the snapshot in the cluster,
+ such as PG_DATA, PG_WAL and PG_TABLESPACE
+ type: string
+ required:
+ - name
+ - type
+ type: object
+ type: array
+ type: object
+ startedAt:
+ description: When the backup was started
+ format: date-time
+ type: string
+ stoppedAt:
+ description: When the backup was terminated
+ format: date-time
+ type: string
+ tablespaceMapFile:
+ description: Tablespace map file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: clusterimagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ClusterImageCatalog
+ listKind: ClusterImageCatalogList
+ plural: clusterimagecatalogs
+ singular: clusterimagecatalog
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterImageCatalog is the Schema for the clusterimagecatalogs
+ API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ClusterImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: clusters.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Cluster
+ listKind: ClusterList
+ plural: clusters
+ singular: cluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Number of instances
+ jsonPath: .status.instances
+ name: Instances
+ type: integer
+ - description: Number of ready instances
+ jsonPath: .status.readyInstances
+ name: Ready
+ type: integer
+ - description: Cluster current status
+ jsonPath: .status.phase
+ name: Status
+ type: string
+ - description: Primary pod
+ jsonPath: .status.currentPrimary
+ name: Primary
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Cluster is the Schema for the PostgreSQL API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the cluster.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ affinity:
+ description: Affinity/Anti-affinity rules for Pods
+ properties:
+ additionalPodAffinity:
+ description: AdditionalPodAffinity allows to specify pod affinity
+ terms to be passed to all the cluster's pods.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ additionalPodAntiAffinity:
+ description: |-
+ AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated
+ by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ enablePodAntiAffinity:
+ description: |-
+ Activates anti-affinity for the pods. The operator will define pods
+ anti-affinity unless this field is explicitly set to false
+ type: boolean
+ nodeAffinity:
+ description: |-
+ NodeAffinity describes node affinity scheduling rules for the pod.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the
+ corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is map of key-value pairs used to define the nodes on which
+ the pods can run.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ podAntiAffinityType:
+ description: |-
+ PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be
+ considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or
+ "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are
+ added if all the existing nodes don't match the required pod anti-affinity rule.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ type: string
+ tolerations:
+ description: |-
+ Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run
+ on tainted nodes.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologyKey:
+ description: |-
+ TopologyKey to use for anti-affinity configuration. See k8s documentation
+ for more info on that
+ type: string
+ type: object
+ backup:
+ description: The configuration to be used for backups
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage
+ JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the
+ region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ archiveAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-archive'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL archive process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ restoreAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-restore'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL restore process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ type: object
+ required:
+ - destinationPath
+ type: object
+ retentionPolicy:
+ description: |-
+ RetentionPolicy is the retention policy to be used for backups
+ and WALs (i.e. '60d'). The retention policy is expressed in the form
+ of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
+ days, weeks, months.
+ It's currently only applicable when using the BarmanObjectStore method.
+ pattern: ^[1-9][0-9]*[dwm]$
+ type: string
+ target:
+ default: prefer-standby
+ description: |-
+ The policy to decide which instance should perform backups. Available
+ options are empty string, which will default to `prefer-standby` policy,
+ `primary` to have backups run always on primary instances, `prefer-standby`
+ to have backups run preferably on the most updated standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ volumeSnapshot:
+ description: VolumeSnapshot provides the configuration for the
+ execution of volume snapshot backups.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations key-value pairs that will be added
+ to .metadata.annotations snapshot resources.
+ type: object
+ className:
+ description: |-
+ ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim.
+ It is the default class for the other types if no specific class is present
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels are key-value pairs that will be added
+ to .metadata.labels snapshot resources.
+ type: object
+ online:
+ default: true
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ type: boolean
+ onlineConfiguration:
+ default:
+ immediateCheckpoint: false
+ waitForArchive: true
+ description: Configuration parameters to control the online/hot
+ backup with volume snapshots
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ snapshotOwnerReference:
+ default: none
+ description: SnapshotOwnerReference indicates the type of
+ owner reference the snapshot should have
+ enum:
+ - none
+ - cluster
+ - backup
+ type: string
+ tablespaceClassName:
+ additionalProperties:
+ type: string
+ description: |-
+ TablespaceClassName specifies the Snapshot Class to be used for the tablespaces.
+ defaults to the PGDATA Snapshot Class, if set
+ type: object
+ walClassName:
+ description: WalClassName specifies the Snapshot Class to
+ be used for the PG_WAL PersistentVolumeClaim.
+ type: string
+ type: object
+ type: object
+ bootstrap:
+ description: Instructions to bootstrap this cluster
+ properties:
+ initdb:
+ description: Bootstrap the cluster via initdb
+ properties:
+ builtinLocale:
+ description: |-
+ Specifies the locale name when the builtin provider is used.
+ This option requires `localeProvider` to be set to `builtin`.
+ Available from PostgreSQL 17.
+ type: string
+ dataChecksums:
+ description: |-
+ Whether the `-k` option should be passed to initdb,
+ enabling checksums on data pages (default: `false`)
+ type: boolean
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ encoding:
+ description: The value to be passed as option `--encoding`
+ for initdb (default:`UTF8`)
+ type: string
+ icuLocale:
+ description: |-
+ Specifies the ICU locale when the ICU provider is used.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 15.
+ type: string
+ icuRules:
+ description: |-
+ Specifies additional collation rules to customize the behavior of the default collation.
+ This option requires `localeProvider` to be set to `icu`.
+ Available from PostgreSQL 16.
+ type: string
+ import:
+ description: |-
+ Bootstraps the new cluster by importing data from an existing PostgreSQL
+ instance using logical backup (`pg_dump` and `pg_restore`)
+ properties:
+ databases:
+ description: The databases to import
+ items:
+ type: string
+ type: array
+ pgDumpExtraOptions:
+ description: |-
+ List of custom options to pass to the `pg_dump` command. IMPORTANT:
+ Use these options with caution and at your own risk, as the operator
+ does not validate their content. Be aware that certain options may
+ conflict with the operator's intended functionality or design.
+ items:
+ type: string
+ type: array
+ pgRestoreExtraOptions:
+ description: |-
+ List of custom options to pass to the `pg_restore` command. IMPORTANT:
+ Use these options with caution and at your own risk, as the operator
+ does not validate their content. Be aware that certain options may
+ conflict with the operator's intended functionality or design.
+ items:
+ type: string
+ type: array
+ postImportApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after is imported - to be used with extreme care
+ (by default empty). Only available in microservice type.
+ items:
+ type: string
+ type: array
+ roles:
+ description: The roles to import
+ items:
+ type: string
+ type: array
+ schemaOnly:
+ description: |-
+ When set to true, only the `pre-data` and `post-data` sections of
+ `pg_restore` are invoked, avoiding data import. Default: `false`.
+ type: boolean
+ source:
+ description: The source of the import
+ properties:
+ externalCluster:
+ description: The name of the externalCluster used
+ for import
+ type: string
+ required:
+ - externalCluster
+ type: object
+ type:
+ description: The import type. Can be `microservice` or
+ `monolith`.
+ enum:
+ - microservice
+ - monolith
+ type: string
+ required:
+ - databases
+ - source
+ - type
+ type: object
+ locale:
+ description: Sets the default collation order and character
+ classification in the new database.
+ type: string
+ localeCType:
+ description: The value to be passed as option `--lc-ctype`
+ for initdb (default:`C`)
+ type: string
+ localeCollate:
+ description: The value to be passed as option `--lc-collate`
+ for initdb (default:`C`)
+ type: string
+ localeProvider:
+ description: |-
+ This option sets the locale provider for databases created in the new cluster.
+ Available from PostgreSQL 16.
+ type: string
+ options:
+ description: |-
+ The list of options that must be passed to initdb when creating the cluster.
+ Deprecated: This could lead to inconsistent configurations,
+ please use the explicit provided parameters instead.
+ If defined, explicit values will be ignored.
+ items:
+ type: string
+ type: array
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ postInitApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitApplicationSQLRefs:
+ description: |-
+ List of references to ConfigMaps or Secrets containing SQL files
+ to be executed as a superuser in the application database right after
+ the cluster has been created. The references are processed in a specific order:
+ first, all Secrets are processed, followed by all ConfigMaps.
+ Within each group, the processing order follows the sequence specified
+ in their respective arrays.
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ postInitSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the `postgres`
+ database right after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitSQLRefs:
+ description: |-
+ List of references to ConfigMaps or Secrets containing SQL files
+ to be executed as a superuser in the `postgres` database right after
+ the cluster has been created. The references are processed in a specific order:
+ first, all Secrets are processed, followed by all ConfigMaps.
+ Within each group, the processing order follows the sequence specified
+ in their respective arrays.
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ postInitTemplateSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the `template1`
+ database right after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitTemplateSQLRefs:
+ description: |-
+ List of references to ConfigMaps or Secrets containing SQL files
+ to be executed as a superuser in the `template1` database right after
+ the cluster has been created. The references are processed in a specific order:
+ first, all Secrets are processed, followed by all ConfigMaps.
+ Within each group, the processing order follows the sequence specified
+ in their respective arrays.
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ walSegmentSize:
+ description: |-
+ The value in megabytes (1 to 1024) to be passed to the `--wal-segsize`
+ option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
+ maximum: 1024
+ minimum: 1
+ type: integer
+ type: object
+ x-kubernetes-validations:
+ - message: builtinLocale is only available when localeProvider
+ is set to `builtin`
+ rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin'''
+ - message: icuLocale is only available when localeProvider is
+ set to `icu`
+ rule: '!has(self.icuLocale) || self.localeProvider == ''icu'''
+ - message: icuRules is only available when localeProvider is set
+ to `icu`
+ rule: '!has(self.icuRules) || self.localeProvider == ''icu'''
+ pg_basebackup:
+ description: |-
+ Bootstrap the cluster taking a physical backup of another compatible
+ PostgreSQL instance
+ properties:
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: The name of the server of which we need to take
+ a physical backup
+ minLength: 1
+ type: string
+ required:
+ - source
+ type: object
+ recovery:
+ description: Bootstrap the cluster from a backup
+ properties:
+ backup:
+ description: |-
+ The backup object containing the physical base backup from which to
+ initiate the recovery procedure.
+ Mutually exclusive with `source` and `volumeSnapshots`.
+ properties:
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ recoveryTarget:
+ description: |-
+ By default, the recovery process applies all the available
+ WAL files in the archive (full recovery). However, you can also
+ end the recovery as soon as a consistent state is reached or
+ recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object,
+ as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+ More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ properties:
+ backupID:
+ description: |-
+ The ID of the backup from which to start the recovery process.
+ If empty (default) the operator will automatically detect the backup
+ based on targetTime or targetLSN if specified. Otherwise use the
+ latest available backup in chronological order.
+ type: string
+ exclusive:
+ description: |-
+ Set the target to be exclusive. If omitted, defaults to false, so that
+ in Postgres, `recovery_target_inclusive` will be true
+ type: boolean
+ targetImmediate:
+ description: End recovery as soon as a consistent state
+ is reached
+ type: boolean
+ targetLSN:
+ description: The target LSN (Log Sequence Number)
+ type: string
+ targetName:
+ description: |-
+ The target name (to be previously created
+ with `pg_create_restore_point`)
+ type: string
+ targetTLI:
+ description: The target timeline ("latest" or a positive
+ integer)
+ type: string
+ targetTime:
+ description: The target time as a timestamp in the RFC3339
+ standard
+ type: string
+ targetXID:
+ description: The target transaction ID
+ type: string
+ type: object
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: |-
+ The external cluster whose backup we will restore. This is also
+ used as the name of the folder under which the backup is stored,
+ so it must be set to the name of the source cluster
+ Mutually exclusive with `backup`.
+ type: string
+ volumeSnapshots:
+ description: |-
+ The static PVC data source(s) from which to initiate the
+ recovery procedure. Currently supporting `VolumeSnapshot`
+ and `PersistentVolumeClaim` resources that map an existing
+ PVC group, compatible with CloudNativePG, and taken with
+ a cold backup copy on a fenced Postgres instance (limitation
+ which will be removed in the future when online backup
+ will be implemented).
+ Mutually exclusive with `backup`.
+ properties:
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ tablespaceStorage:
+ additionalProperties:
+ description: |-
+ TypedLocalObjectReference contains enough information to let you locate the
+ typed referenced object inside the same namespace.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ description: Configuration of the storage for PostgreSQL
+ tablespaces
+ type: object
+ walStorage:
+ description: Configuration of the storage for PostgreSQL
+ WAL (Write-Ahead Log)
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - storage
+ type: object
+ type: object
+ type: object
+ certificates:
+ description: The configuration for the CA and related certificates
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ description:
+ description: Description of this PostgreSQL cluster
+ type: string
+ enablePDB:
+ default: true
+ description: |-
+ Manage the `PodDisruptionBudget` resources within the cluster. When
+ configured as `true` (default setting), the pod disruption budgets
+ will safeguard the primary node from being terminated. Conversely,
+ setting it to `false` will result in the absence of any
+ `PodDisruptionBudget` resource, permitting the shutdown of all nodes
+ hosting the PostgreSQL cluster. This latter configuration is
+ advisable for any PostgreSQL cluster employed for
+ development/staging purposes.
+ type: boolean
+ enableSuperuserAccess:
+ default: false
+ description: |-
+ When this option is enabled, the operator will use the `SuperuserSecret`
+ to update the `postgres` user password (if the secret is
+ not present, the operator will automatically create one). When this
+ option is disabled, the operator will ignore the `SuperuserSecret` content, delete
+ it when automatically created, and then blank the password of the `postgres`
+ user by setting it to `NULL`. Disabled by default.
+ type: boolean
+ env:
+ description: |-
+ Env follows the Env format to pass environment variables
+ to the pods created in the cluster
+ items:
+ description: EnvVar represents an environment variable present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ EnvFrom follows the EnvFrom format to pass environment variables
+ sources to the pods to be used by Env
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in
+ the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ ephemeralVolumeSource:
+ description: EphemeralVolumeSource allows the user to configure the
+ source of ephemeral volumes.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to
+ consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the
+ PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ ephemeralVolumesSizeLimit:
+ description: |-
+ EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral
+ volumes
+ properties:
+ shm:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Shm is the size limit of the shared memory volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ temporaryData:
+ anyOf:
+ - type: integer
+ - type: string
+ description: TemporaryData is the size limit of the temporary
+ data volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ externalClusters:
+ description: The list of external clusters which are used in the configuration
+ items:
+ description: |-
+ ExternalCluster represents the connection parameters to an
+ external cluster which is used in the other sections of the configuration
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud
+ Storage JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing
+ the region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ archiveAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-archive'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL archive process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ restoreAdditionalCommandArgs:
+ description: |-
+ Additional arguments that can be appended to the 'barman-cloud-wal-restore'
+ command-line invocation. These arguments provide flexibility to customize
+ the WAL restore process further, according to specific requirements or configurations.
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ type: object
+ required:
+ - destinationPath
+ type: object
+ connectionParameters:
+ additionalProperties:
+ type: string
+ description: The list of connection parameters, such as dbname,
+ host, username, etc
+ type: object
+ name:
+ description: The server name, required
+ type: string
+ password:
+ description: |-
+ The reference to the password to be used to connect to the server.
+ If a password is provided, CloudNativePG creates a PostgreSQL
+ passfile at `/controller/external/NAME/pass` (where "NAME" is the
+ cluster's name). This passfile is automatically referenced in the
+ connection string when establishing a connection to the remote
+ PostgreSQL server from the current PostgreSQL `Cluster`. This ensures
+ secure and efficient password management for external clusters.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ plugin:
+ description: |-
+ The configuration of the plugin that is taking care
+ of WAL archiving and backups for this external cluster
+ properties:
+ enabled:
+ default: true
+ description: Enabled is true if this plugin will be used
+ type: boolean
+ name:
+ description: Name is the plugin name
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is the configuration of the plugin
+ type: object
+ required:
+ - name
+ type: object
+ sslCert:
+ description: |-
+ The reference to an SSL certificate to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslKey:
+ description: |-
+ The reference to an SSL private key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslRootCert:
+ description: |-
+ The reference to an SSL CA public key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - name
+ type: object
+ type: array
+ failoverDelay:
+ default: 0
+ description: |-
+ The amount of time (in seconds) to wait before triggering a failover
+ after the primary PostgreSQL instance in the cluster was detected
+ to be unhealthy
+ format: int32
+ type: integer
+ imageCatalogRef:
+ description: Defines the major PostgreSQL version we want to use within
+ an ImageCatalog
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ major:
+ description: The major version of PostgreSQL we want to use from
+ the ImageCatalog
+ type: integer
+ x-kubernetes-validations:
+ - message: Major is immutable
+ rule: self == oldSelf
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - major
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: Only image catalogs are supported
+ rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'
+ - message: Only image catalogs are supported
+ rule: self.apiGroup == 'postgresql.cnpg.io'
+ imageName:
+ description: |-
+ Name of the container image, supporting both tags (`:`)
+ and digests for deterministic and repeatable deployments
+ (`:@sha256:`)
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of `Always`, `Never` or `IfNotPresent`.
+ If not defined, it defaults to `IfNotPresent`.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ imagePullSecrets:
+ description: The list of pull secrets to be used to pull the images
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate a
+ local object with a known type inside the same namespace
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ inheritedMetadata:
+ description: Metadata that will be inherited by all objects related
+ to the Cluster
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ instances:
+ default: 1
+ description: Number of instances required in the cluster
+ minimum: 1
+ type: integer
+ livenessProbeTimeout:
+ description: |-
+ LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+ to successfully respond to the liveness probe (default 30).
+ The Liveness probe failure threshold is derived from this value using the formula:
+ ceiling(livenessProbe / 10).
+ format: int32
+ type: integer
+ logLevel:
+ default: info
+ description: 'The instances'' log level, one of the following values:
+ error, warning, info (default), debug, trace'
+ enum:
+ - error
+ - warning
+ - info
+ - debug
+ - trace
+ type: string
+ managed:
+ description: The configuration that is used by the portions of PostgreSQL
+ that are managed by the instance manager
+ properties:
+ roles:
+ description: Database roles managed by the `Cluster`
+ items:
+ description: |-
+ RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
+ with the additional field Ensure specifying whether to ensure the presence or
+ absence of the role in the database
+
+ The defaults of the CREATE ROLE command are applied
+ Reference: https://www.postgresql.org/docs/current/sql-createrole.html
+ properties:
+ bypassrls:
+ description: |-
+ Whether a role bypasses every row-level security (RLS) policy.
+ Default is `false`.
+ type: boolean
+ comment:
+ description: Description of the role
+ type: string
+ connectionLimit:
+ default: -1
+ description: |-
+ If the role can log in, this specifies how many concurrent
+ connections the role can make. `-1` (the default) means no limit.
+ format: int64
+ type: integer
+ createdb:
+ description: |-
+ When set to `true`, the role being defined will be allowed to create
+ new databases. Specifying `false` (default) will deny a role the
+ ability to create databases.
+ type: boolean
+ createrole:
+ description: |-
+ Whether the role will be permitted to create, alter, drop, comment
+ on, change the security label for, and grant or revoke membership in
+ other roles. Default is `false`.
+ type: boolean
+ disablePassword:
+ description: DisablePassword indicates that a role's password
+ should be set to NULL in Postgres
+ type: boolean
+ ensure:
+ default: present
+ description: Ensure the role is `present` or `absent` -
+ defaults to "present"
+ enum:
+ - present
+ - absent
+ type: string
+ inRoles:
+ description: |-
+ List of one or more existing roles to which this role will be
+ immediately added as a new member. Default empty.
+ items:
+ type: string
+ type: array
+ inherit:
+ default: true
+ description: |-
+ Whether a role "inherits" the privileges of roles it is a member of.
+ Defaults is `true`.
+ type: boolean
+ login:
+ description: |-
+ Whether the role is allowed to log in. A role having the `login`
+ attribute can be thought of as a user. Roles without this attribute
+ are useful for managing database privileges, but are not users in
+ the usual sense of the word. Default is `false`.
+ type: boolean
+ name:
+ description: Name of the role
+ type: string
+ passwordSecret:
+ description: |-
+ Secret containing the password of the role (if present)
+ If null, the password will be ignored unless DisablePassword is set
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ replication:
+ description: |-
+ Whether a role is a replication role. A role must have this
+ attribute (or be a superuser) in order to be able to connect to the
+ server in replication mode (physical or logical replication) and in
+ order to be able to create or drop replication slots. A role having
+ the `replication` attribute is a very highly privileged role, and
+ should only be used on roles actually used for replication. Default
+ is `false`.
+ type: boolean
+ superuser:
+ description: |-
+ Whether the role is a `superuser` who can override all access
+ restrictions within the database - superuser status is dangerous and
+ should be used only when really needed. You must yourself be a
+ superuser to create a new superuser. Defaults is `false`.
+ type: boolean
+ validUntil:
+ description: |-
+ Date and time after which the role's password is no longer valid.
+ When omitted, the password will never expire (default).
+ format: date-time
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ services:
+ description: Services roles managed by the `Cluster`
+ properties:
+ additional:
+ description: Additional is a list of additional managed services
+ specified by the user.
+ items:
+ description: |-
+ ManagedService represents a specific service managed by the cluster.
+ It includes the type of service and its associated template specification.
+ properties:
+ selectorType:
+ description: |-
+ SelectorType specifies the type of selectors that the service will have.
+ Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.
+ enum:
+ - rw
+ - r
+ - ro
+ type: string
+ serviceTemplate:
+ description: ServiceTemplate is the template specification
+ for the service.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only
+ supported for certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the service.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information
+ on service's port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed
+ by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains
+ the configurations of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations
+ of Client IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic is
+ distributed to Service endpoints. Implementations can use this field as a
+ hint, but are not required to guarantee strict adherence. If the field is
+ not set, the implementation will apply its default routing strategy. If set
+ to "PreferClose", implementations should prioritize endpoints that are
+ topologically close (e.g., same zone).
+ This is a beta field and requires enabling ServiceTrafficDistribution feature.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ type: object
+ updateStrategy:
+ default: patch
+ description: UpdateStrategy describes how the service
+ differences should be reconciled
+ enum:
+ - patch
+ - replace
+ type: string
+ required:
+ - selectorType
+ - serviceTemplate
+ type: object
+ type: array
+ disabledDefaultServices:
+ description: |-
+ DisabledDefaultServices is a list of service types that are disabled by default.
+ Valid values are "r", and "ro", representing read, and read-only services.
+ items:
+ description: |-
+ ServiceSelectorType describes a valid value for generating the service selectors.
+ It indicates which type of service the selector applies to, such as read-write, read, or read-only
+ enum:
+ - rw
+ - r
+ - ro
+ type: string
+ type: array
+ type: object
+ type: object
+ maxSyncReplicas:
+ default: 0
+ description: |-
+ The target value for the synchronous replication quorum, that can be
+ decreased if the number of ready standbys is lower than this.
+ Undefined or 0 disable synchronous replication.
+ minimum: 0
+ type: integer
+ minSyncReplicas:
+ default: 0
+ description: |-
+ Minimum number of instances required in synchronous replication with the
+ primary. Undefined or 0 allow writes to complete when no standby is
+ available.
+ minimum: 0
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this cluster
+ properties:
+ customQueriesConfigMap:
+ description: The list of config maps containing the custom queries
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ customQueriesSecret:
+ description: The list of secrets containing the custom queries
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ disableDefaultQueries:
+ default: false
+ description: |-
+ Whether the default queries should be injected.
+ Set it to `true` if you don't want to inject default queries into the cluster.
+ Default: false.
+ type: boolean
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ tls:
+ description: |-
+ Configure TLS communication for the metrics endpoint.
+ Changing tls.enabled option will force a rollout of all instances.
+ properties:
+ enabled:
+ default: false
+ description: |-
+ Enable TLS for the monitoring endpoint.
+ Changing this option will force a rollout of all instances.
+ type: boolean
+ type: object
+ type: object
+ nodeMaintenanceWindow:
+ description: Define a maintenance window for the Kubernetes nodes
+ properties:
+ inProgress:
+ default: false
+ description: Is there a node maintenance activity in progress?
+ type: boolean
+ reusePVC:
+ default: true
+ description: |-
+ Reuse the existing PVC (wait for the node to come
+ up again) or not (recreate it elsewhere - when `instances` >1)
+ type: boolean
+ type: object
+ plugins:
+ description: |-
+ The plugins configuration, containing
+ any plugin to be loaded with the corresponding configuration
+ items:
+ description: |-
+ PluginConfiguration specifies a plugin that need to be loaded for this
+ cluster to be reconciled
+ properties:
+ enabled:
+ default: true
+ description: Enabled is true if this plugin will be used
+ type: boolean
+ name:
+ description: Name is the plugin name
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is the configuration of the plugin
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ postgresGID:
+ default: 26
+ description: The GID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresUID:
+ default: 26
+ description: The UID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresql:
+ description: Configuration of the PostgreSQL server
+ properties:
+ enableAlterSystem:
+ description: |-
+ If this parameter is true, the user will be able to invoke `ALTER SYSTEM`
+ on this CloudNativePG Cluster.
+ This should only be used for debugging and troubleshooting.
+ Defaults to false.
+ type: boolean
+ ldap:
+ description: Options to specify LDAP configuration
+ properties:
+ bindAsAuth:
+ description: Bind as authentication configuration
+ properties:
+ prefix:
+ description: Prefix for the bind authentication option
+ type: string
+ suffix:
+ description: Suffix for the bind authentication option
+ type: string
+ type: object
+ bindSearchAuth:
+ description: Bind+Search authentication configuration
+ properties:
+ baseDN:
+ description: Root DN to begin the user search
+ type: string
+ bindDN:
+ description: DN of the user to bind to the directory
+ type: string
+ bindPassword:
+ description: Secret with the password for the user to
+ bind to the directory
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ searchAttribute:
+ description: Attribute to match against the username
+ type: string
+ searchFilter:
+ description: Search filter to use when doing the search+bind
+ authentication
+ type: string
+ type: object
+ port:
+ description: LDAP server port
+ type: integer
+ scheme:
+ description: LDAP schema to be used, possible options are
+ `ldap` and `ldaps`
+ enum:
+ - ldap
+ - ldaps
+ type: string
+ server:
+ description: LDAP hostname or IP address
+ type: string
+ tls:
+ description: Set to 'true' to enable LDAP over TLS. 'false'
+ is default
+ type: boolean
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: PostgreSQL configuration options (postgresql.conf)
+ type: object
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ pg_ident:
+ description: |-
+ PostgreSQL User Name Maps rules (lines to be appended
+ to the pg_ident.conf file)
+ items:
+ type: string
+ type: array
+ promotionTimeout:
+ description: |-
+ Specifies the maximum number of seconds to wait when promoting an instance to primary.
+ Default value is 40000000, greater than one year in seconds,
+ big enough to simulate an infinite timeout
+ format: int32
+ type: integer
+ shared_preload_libraries:
+ description: Lists of shared preload libraries to add to the default
+ ones
+ items:
+ type: string
+ type: array
+ syncReplicaElectionConstraint:
+ description: |-
+ Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be
+ set up.
+ properties:
+ enabled:
+ description: This flag enables the constraints for sync replicas
+ type: boolean
+ nodeLabelsAntiAffinity:
+ description: A list of node labels values to extract and compare
+ to evaluate if the pods reside in the same topology or not
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ synchronous:
+ description: Configuration of the PostgreSQL synchronous replication
+ feature
+ properties:
+ dataDurability:
+ default: required
+ description: |-
+ If set to "required", data durability is strictly enforced. Write operations
+ with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will
+ block if there are insufficient healthy replicas, ensuring data persistence.
+ If set to "preferred", data durability is maintained when healthy replicas
+ are available, but the required number of instances will adjust dynamically
+ if replicas become unavailable. This setting relaxes strict durability enforcement
+ to allow for operational continuity. This setting is only applicable if both
+ `standbyNamesPre` and `standbyNamesPost` are unset (empty).
+ enum:
+ - required
+ - preferred
+ type: string
+ maxStandbyNamesFromCluster:
+ description: |-
+ Specifies the maximum number of local cluster pods that can be
+ automatically included in the `synchronous_standby_names` option in
+ PostgreSQL.
+ type: integer
+ method:
+ description: |-
+ Method to select synchronous replication standbys from the listed
+ servers, accepting 'any' (quorum-based synchronous replication) or
+ 'first' (priority-based synchronous replication) as values.
+ enum:
+ - any
+ - first
+ type: string
+ number:
+ description: |-
+ Specifies the number of synchronous standby servers that
+ transactions must wait for responses from.
+ type: integer
+ x-kubernetes-validations:
+ - message: The number of synchronous replicas should be greater
+ than zero
+ rule: self > 0
+ standbyNamesPost:
+ description: |-
+ A user-defined list of application names to be added to
+ `synchronous_standby_names` after local cluster pods (the order is
+ only useful for priority-based synchronous replication).
+ items:
+ type: string
+ type: array
+ standbyNamesPre:
+ description: |-
+ A user-defined list of application names to be added to
+ `synchronous_standby_names` before local cluster pods (the order is
+ only useful for priority-based synchronous replication).
+ items:
+ type: string
+ type: array
+ required:
+ - method
+ - number
+ type: object
+ x-kubernetes-validations:
+ - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre'
+ and empty 'standbyNamesPost'
+ rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre)
+ || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost)
+ || self.standbyNamesPost.size()==0))
+ type: object
+ primaryUpdateMethod:
+ default: restart
+ description: |-
+ Method to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be with a switchover (`switchover`) or in-place (`restart` - default)
+ enum:
+ - switchover
+ - restart
+ type: string
+ primaryUpdateStrategy:
+ default: unsupervised
+ description: |-
+ Deployment strategy to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be automated (`unsupervised` - default) or manual (`supervised`)
+ enum:
+ - unsupervised
+ - supervised
+ type: string
+ priorityClassName:
+ description: |-
+ Name of the priority class which will be used in every generated Pod, if the PriorityClass
+ specified does not exist, the pod will not be able to schedule. Please refer to
+ https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+ for more information
+ type: string
+ probes:
+ description: |-
+ The configuration of the probes to be injected
+ in the PostgreSQL Pods.
+ properties:
+ liveness:
+ description: The liveness probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ readiness:
+ description: The readiness probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ startup:
+ description: The startup probe configuration
+ properties:
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ type: object
+ projectedVolumeTemplate:
+ description: |-
+ Template to be used to define projected volumes, projected volumes will be mounted
+ under `/projected` base folder
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume root to write
+ the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap data
+ to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the downwardAPI
+ data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume file
+ items:
+ description: DownwardAPIVolumeFile represents information
+ to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the
+ pod: only annotations, labels, name, namespace
+ and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must not
+ be absolute or contain the ''..'' path. Must
+ be utf-8 encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for
+ volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the secret data to
+ project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify whether the Secret
+ or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information about the
+ serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ replica:
+ description: Replica cluster configuration
+ properties:
+ enabled:
+ description: |-
+ If replica mode is enabled, this cluster will be a replica of an
+ existing cluster. Replica cluster can be created from a recovery
+ object store or via streaming through pg_basebackup.
+ Refer to the Replica clusters page of the documentation for more information.
+ type: boolean
+ minApplyDelay:
+ description: |-
+ When replica mode is enabled, this parameter allows you to replay
+ transactions only when the system time is at least the configured
+ time past the commit time. This provides an opportunity to correct
+ data loss errors. Note that when this parameter is set, a promotion
+ token cannot be used.
+ type: string
+ primary:
+ description: |-
+ Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the
+ topology specified in externalClusters
+ type: string
+ promotionToken:
+ description: |-
+ A demotion token generated by an external cluster used to
+ check if the promotion requirements are met.
+ type: string
+ self:
+ description: |-
+ Self defines the name of this cluster. It is used to determine if this is a primary
+ or a replica cluster, comparing it with `primary`
+ type: string
+ source:
+ description: The name of the external cluster which is the replication
+ origin
+ minLength: 1
+ type: string
+ required:
+ - source
+ type: object
+ replicationSlots:
+ default:
+ highAvailability:
+ enabled: true
+ description: Replication slots management configuration
+ properties:
+ highAvailability:
+ default:
+ enabled: true
+ description: Replication slots for high availability configuration
+ properties:
+ enabled:
+ default: true
+ description: |-
+ If enabled (default), the operator will automatically manage replication slots
+ on the primary instance and use them in streaming replication
+ connections with all the standby instances that are part of the HA
+ cluster. If disabled, the operator will not take advantage
+ of replication slots in streaming connections with the replicas.
+ This feature also controls replication slots in replica cluster,
+ from the designated primary to its cascading replicas.
+ type: boolean
+ slotPrefix:
+ default: _cnpg_
+ description: |-
+ Prefix for replication slots managed by the operator for HA.
+ It may only contain lower case letters, numbers, and the underscore character.
+ This can only be set at creation time. By default set to `_cnpg_`.
+ pattern: ^[0-9a-z_]*$
+ type: string
+ type: object
+ synchronizeReplicas:
+ description: Configures the synchronization of the user defined
+ physical replication slots
+ properties:
+ enabled:
+ default: true
+ description: When set to true, every replication slot that
+ is on the primary is synchronized on each standby
+ type: boolean
+ excludePatterns:
+ description: List of regular expression patterns to match
+ the names of replication slots to be excluded (by default
+ empty)
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ updateInterval:
+ default: 30
+ description: |-
+ Standby will update the status of the local replication slots
+ every `updateInterval` seconds (default 30).
+ minimum: 1
+ type: integer
+ type: object
+ resources:
+ description: |-
+ Resources requirements of every generated Pod. Please refer to
+ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ for more information.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified Kubernetes
+ scheduler. If not specified, the pod will be dispatched by the default
+ scheduler. More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
+ type: string
+ seccompProfile:
+ description: |-
+ The SeccompProfile applied to every Pod and Container.
+ Defaults to: `RuntimeDefault`
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountTemplate:
+ description: Configure the generation of the service account
+ properties:
+ metadata:
+ description: |-
+ Metadata are the metadata to be used for the generated
+ service account
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ required:
+ - metadata
+ type: object
+ smartShutdownTimeout:
+ default: 180
+ description: |-
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+ Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+ (that is: `stopDelay` - `smartShutdownTimeout`).
+ format: int32
+ type: integer
+ startDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ successfully start up (default 3600).
+ The startup probe failure threshold is derived from this value using the formula:
+ ceiling(startDelay / 10).
+ format: int32
+ type: integer
+ stopDelay:
+ default: 1800
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ gracefully shutdown (default 1800)
+ format: int32
+ type: integer
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ superuserSecret:
+ description: |-
+ The secret containing the superuser password. If not defined a new
+ secret will be created with a randomly generated password
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ switchoverDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a primary PostgreSQL instance
+ to gracefully shutdown during a switchover.
+ Default value is 3600 seconds (1 hour).
+ format: int32
+ type: integer
+ tablespaces:
+ description: The tablespaces configuration
+ items:
+ description: |-
+ TablespaceConfiguration is the configuration of a tablespace, and includes
+ the storage specification for the tablespace
+ properties:
+ name:
+ description: The name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ properties:
+ name:
+ type: string
+ type: object
+ storage:
+ description: The storage configuration for the tablespace
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent
+ Volume Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes
+ to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to
+ the PersistentVolume backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ temporary:
+ default: false
+ description: |-
+ When set to true, the tablespace will be added as a `temp_tablespaces`
+ entry in PostgreSQL, and will be available to automatically house temp
+ database objects, or other temporary files. Please refer to PostgreSQL
+ documentation for more information on the `temp_tablespaces` GUC.
+ type: boolean
+ required:
+ - name
+ - storage
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints specifies how to spread matching pods among the given topology.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching
+ pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ walStorage:
+ description: Configuration of the storage for PostgreSQL WAL (Write-Ahead
+ Log)
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ required:
+ - instances
+ type: object
+ x-kubernetes-validations:
+ - message: imageName and imageCatalogRef are mutually exclusive
+ rule: '!(has(self.imageCatalogRef) && has(self.imageName))'
+ status:
+ description: |-
+ Most recently observed status of the cluster. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ availableArchitectures:
+ description: AvailableArchitectures reports the available architectures
+ of a cluster
+ items:
+ description: AvailableArchitecture represents the state of a cluster's
+ architecture
+ properties:
+ goArch:
+ description: GoArch is the name of the executable architecture
+ type: string
+ hash:
+ description: Hash is the hash of the executable
+ type: string
+ required:
+ - goArch
+ - hash
+ type: object
+ type: array
+ azurePVCUpdateEnabled:
+ description: AzurePVCUpdateEnabled shows if the PVC online upgrade
+ is enabled for this cluster
+ type: boolean
+ certificates:
+ description: The configuration for the CA and related certificates,
+ initialized with defaults.
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ expirations:
+ additionalProperties:
+ type: string
+ description: Expiration dates for all certificates.
+ type: object
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ cloudNativePGCommitHash:
+ description: The commit hash number of which this operator running
+ type: string
+ cloudNativePGOperatorHash:
+ description: The hash of the binary of the operator
+ type: string
+ conditions:
+ description: Conditions for cluster object
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ configMapResourceVersion:
+ description: |-
+ The list of resource versions of the configmaps,
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ configmap data
+ properties:
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the config maps used to pass metrics.
+ Map keys are the config map names, map values are the versions
+ type: object
+ type: object
+ currentPrimary:
+ description: Current primary instance
+ type: string
+ currentPrimaryFailingSinceTimestamp:
+ description: |-
+ The timestamp when the primary was detected to be unhealthy
+ This field is reported when `.spec.failoverDelay` is populated or during online upgrades
+ type: string
+ currentPrimaryTimestamp:
+ description: The timestamp when the last actual promotion to primary
+ has occurred
+ type: string
+ danglingPVC:
+ description: |-
+ List of all the PVCs created by this cluster and still available
+ which are not attached to a Pod
+ items:
+ type: string
+ type: array
+ demotionToken:
+ description: |-
+ DemotionToken is a JSON token containing the information
+ from pg_controldata such as Database system identifier, Latest checkpoint's
+ TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO
+ WAL file, and Time of latest checkpoint
+ type: string
+ firstRecoverabilityPoint:
+ description: |-
+ The first recoverability point, stored as a date in RFC3339 format.
+ This field is calculated from the content of FirstRecoverabilityPointByMethod
+ type: string
+ firstRecoverabilityPointByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: The first recoverability point, stored as a date in RFC3339
+ format, per backup method type
+ type: object
+ healthyPVC:
+ description: List of all the PVCs not dangling nor initializing
+ items:
+ type: string
+ type: array
+ image:
+ description: Image contains the image name used by the pods
+ type: string
+ initializingPVC:
+ description: List of all the PVCs that are being initialized by this
+ cluster
+ items:
+ type: string
+ type: array
+ instanceNames:
+ description: List of instance names in the cluster
+ items:
+ type: string
+ type: array
+ instances:
+ description: The total number of PVC Groups detected in the cluster.
+ It may differ from the number of existing instance pods.
+ type: integer
+ instancesReportedState:
+ additionalProperties:
+ description: InstanceReportedState describes the last reported state
+ of an instance during a reconciliation loop
+ properties:
+ isPrimary:
+ description: indicates if an instance is the primary one
+ type: boolean
+ timeLineID:
+ description: indicates on which TimelineId the instance is
+ type: integer
+ required:
+ - isPrimary
+ type: object
+ description: The reported state of the instances during the last reconciliation
+ loop
+ type: object
+ instancesStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: InstancesStatus indicates in which status the instances
+ are
+ type: object
+ jobCount:
+ description: How many Jobs have been created by this cluster
+ format: int32
+ type: integer
+ lastFailedBackup:
+ description: Stored as a date in RFC3339 format
+ type: string
+ lastPromotionToken:
+ description: |-
+ LastPromotionToken is the last verified promotion token that
+ was used to promote a replica cluster
+ type: string
+ lastSuccessfulBackup:
+ description: |-
+ Last successful backup, stored as a date in RFC3339 format
+ This field is calculated from the content of LastSuccessfulBackupByMethod
+ type: string
+ lastSuccessfulBackupByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: Last successful backup, stored as a date in RFC3339 format,
+ per backup method type
+ type: object
+ latestGeneratedNode:
+ description: ID of the latest generated node (used to avoid node name
+ clashing)
+ type: integer
+ managedRolesStatus:
+ description: ManagedRolesStatus reports the state of the managed roles
+ in the cluster
+ properties:
+ byStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: ByStatus gives the list of roles in each state
+ type: object
+ cannotReconcile:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: |-
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+ with an explanation of the cause
+ type: object
+ passwordStatus:
+ additionalProperties:
+ description: PasswordState represents the state of the password
+ of a managed RoleConfiguration
+ properties:
+ resourceVersion:
+ description: the resource version of the password secret
+ type: string
+ transactionID:
+ description: the last transaction ID to affect the role
+ definition in PostgreSQL
+ format: int64
+ type: integer
+ type: object
+ description: PasswordStatus gives the last transaction id and
+ password secret version for each managed role
+ type: object
+ type: object
+ onlineUpdateEnabled:
+ description: OnlineUpdateEnabled shows if the online upgrade is enabled
+ inside the cluster
+ type: boolean
+ phase:
+ description: Current phase of the cluster
+ type: string
+ phaseReason:
+ description: Reason for the current phase
+ type: string
+ pluginStatus:
+ description: PluginStatus is the status of the loaded plugins
+ items:
+ description: PluginStatus is the status of a loaded plugin
+ properties:
+ backupCapabilities:
+ description: |-
+ BackupCapabilities are the list of capabilities of the
+ plugin regarding the Backup management
+ items:
+ type: string
+ type: array
+ capabilities:
+ description: |-
+ Capabilities are the list of capabilities of the
+ plugin
+ items:
+ type: string
+ type: array
+ name:
+ description: Name is the name of the plugin
+ type: string
+ operatorCapabilities:
+ description: |-
+ OperatorCapabilities are the list of capabilities of the
+ plugin regarding the reconciler
+ items:
+ type: string
+ type: array
+ restoreJobHookCapabilities:
+ description: |-
+ RestoreJobHookCapabilities are the list of capabilities of the
+ plugin regarding the RestoreJobHook management
+ items:
+ type: string
+ type: array
+ status:
+ description: Status contain the status reported by the plugin
+ through the SetStatusInCluster interface
+ type: string
+ version:
+ description: |-
+ Version is the version of the plugin loaded by the
+ latest reconciliation loop
+ type: string
+ walCapabilities:
+ description: |-
+ WALCapabilities are the list of capabilities of the
+ plugin regarding the WAL management
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - version
+ type: object
+ type: array
+ poolerIntegrations:
+ description: The integration needed by poolers referencing the cluster
+ properties:
+ pgBouncerIntegration:
+ description: PgBouncerIntegrationStatus encapsulates the needed
+ integration for the pgbouncer poolers referencing the cluster
+ properties:
+ secrets:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ pvcCount:
+ description: How many PVCs have been created by this cluster
+ format: int32
+ type: integer
+ readService:
+ description: Current list of read pods
+ type: string
+ readyInstances:
+ description: The total number of ready instances in the cluster. It
+ is equal to the number of ready instance pods.
+ type: integer
+ resizingPVC:
+ description: List of all the PVCs that have ResizingPVC condition.
+ items:
+ type: string
+ type: array
+ secretsResourceVersion:
+ description: |-
+ The list of resource versions of the secrets
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ secret data
+ properties:
+ applicationSecretVersion:
+ description: The resource version of the "app" user secret
+ type: string
+ barmanEndpointCA:
+ description: The resource version of the Barman Endpoint CA if
+ provided
+ type: string
+ caSecretVersion:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ clientCaSecretVersion:
+ description: The resource version of the PostgreSQL client-side
+ CA secret version
+ type: string
+ externalClusterSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the external cluster secrets
+ type: object
+ managedRoleSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the managed roles secrets
+ type: object
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the secrets used to pass metrics.
+ Map keys are the secret names, map values are the versions
+ type: object
+ replicationSecretVersion:
+ description: The resource version of the "streaming_replica" user
+ secret
+ type: string
+ serverCaSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ CA secret version
+ type: string
+ serverSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ secret version
+ type: string
+ superuserSecretVersion:
+ description: The resource version of the "postgres" user secret
+ type: string
+ type: object
+ switchReplicaClusterStatus:
+ description: SwitchReplicaClusterStatus is the status of the switch
+ to replica cluster
+ properties:
+ inProgress:
+ description: InProgress indicates if there is an ongoing procedure
+ of switching a cluster to a replica cluster.
+ type: boolean
+ type: object
+ tablespacesStatus:
+ description: TablespacesStatus reports the state of the declarative
+ tablespaces in the cluster
+ items:
+ description: TablespaceState represents the state of a tablespace
+ in a cluster
+ properties:
+ error:
+ description: Error is the reconciliation error, if any
+ type: string
+ name:
+ description: Name is the name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ type: string
+ state:
+ description: State is the latest reconciliation state
+ type: string
+ required:
+ - name
+ - state
+ type: object
+ type: array
+ targetPrimary:
+ description: |-
+ Target primary instance, this is different from the previous one
+ during a switchover or a failover
+ type: string
+ targetPrimaryTimestamp:
+ description: The timestamp when the last request for a new primary
+ has occurred
+ type: string
+ timelineID:
+ description: The timeline of the Postgres cluster
+ type: integer
+ topology:
+ description: Instances topology.
+ properties:
+ instances:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: PodTopologyLabels represent the topology of a Pod.
+ map[labelName]labelValue
+ type: object
+ description: Instances contains the pod topology of the instances
+ type: object
+ nodesUsed:
+ description: |-
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+ A value of '1' suggests that all instances are hosted on a single node,
+ implying the absence of High Availability (HA). Ideally, this value should
+ be the same as the number of instances in the Postgres HA cluster, implying
+ shared nothing architecture on the compute side.
+ format: int32
+ type: integer
+ successfullyExtracted:
+ description: |-
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+ in synchronous replica election in case of failures
+ type: boolean
+ type: object
+ unusablePVC:
+ description: List of all the PVCs that are unusable because another
+ PVC is missing
+ items:
+ type: string
+ type: array
+ writeService:
+ description: Current write pod
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: databases.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Database
+ listKind: DatabaseList
+ plural: databases
+ singular: database
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Database is the Schema for the databases API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired Database.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allowConnections:
+ description: |-
+ Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and
+ `ALTER DATABASE`. If false then no one can connect to this database.
+ type: boolean
+ builtinLocale:
+ description: |-
+ Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. Specifies the locale name when the
+ builtin provider is used. This option requires `localeProvider` to
+ be set to `builtin`. Available from PostgreSQL 17.
+ type: string
+ x-kubernetes-validations:
+ - message: builtinLocale is immutable
+ rule: self == oldSelf
+ cluster:
+ description: The name of the PostgreSQL cluster hosting the database.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ collationVersion:
+ description: |-
+ Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This
+ setting cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: collationVersion is immutable
+ rule: self == oldSelf
+ connectionLimit:
+ description: |-
+ Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and
+ `ALTER DATABASE`. How many concurrent connections can be made to
+ this database. -1 (the default) means no limit.
+ type: integer
+ databaseReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this database.
+ enum:
+ - delete
+ - retain
+ type: string
+ encoding:
+ description: |-
+ Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Character set encoding to use in the database.
+ type: string
+ x-kubernetes-validations:
+ - message: encoding is immutable
+ rule: self == oldSelf
+ ensure:
+ default: present
+ description: Ensure the PostgreSQL database is `present` or `absent`
+ - defaults to "present".
+ enum:
+ - present
+ - absent
+ type: string
+ icuLocale:
+ description: |-
+ Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. Specifies the ICU locale when the ICU
+ provider is used. This option requires `localeProvider` to be set to
+ `icu`. Available from PostgreSQL 15.
+ type: string
+ x-kubernetes-validations:
+ - message: icuLocale is immutable
+ rule: self == oldSelf
+ icuRules:
+ description: |-
+ Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Specifies additional collation rules to customize
+ the behavior of the default collation. This option requires
+ `localeProvider` to be set to `icu`. Available from PostgreSQL 16.
+ type: string
+ x-kubernetes-validations:
+ - message: icuRules is immutable
+ rule: self == oldSelf
+ isTemplate:
+ description: |-
+ Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER
+ DATABASE`. If true, this database is considered a template and can
+ be cloned by any user with `CREATEDB` privileges.
+ type: boolean
+ locale:
+ description: |-
+ Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. Sets the default collation order and character
+ classification in the new database.
+ type: string
+ x-kubernetes-validations:
+ - message: locale is immutable
+ rule: self == oldSelf
+ localeCType:
+ description: |-
+ Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: localeCType is immutable
+ rule: self == oldSelf
+ localeCollate:
+ description: |-
+ Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This
+ setting cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: localeCollate is immutable
+ rule: self == oldSelf
+ localeProvider:
+ description: |-
+ Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This
+ setting cannot be changed. This option sets the locale provider for
+ databases created in the new cluster. Available from PostgreSQL 16.
+ type: string
+ x-kubernetes-validations:
+ - message: localeProvider is immutable
+ rule: self == oldSelf
+ name:
+ description: The name of the database to create inside PostgreSQL.
+ This setting cannot be changed.
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ - message: the name postgres is reserved
+ rule: self != 'postgres'
+ - message: the name template0 is reserved
+ rule: self != 'template0'
+ - message: the name template1 is reserved
+ rule: self != 'template1'
+ owner:
+ description: |-
+ Maps to the `OWNER` parameter of `CREATE DATABASE`.
+ Maps to the `OWNER TO` command of `ALTER DATABASE`.
+ The role name of the user who owns the database inside PostgreSQL.
+ type: string
+ tablespace:
+ description: |-
+ Maps to the `TABLESPACE` parameter of `CREATE DATABASE`.
+ Maps to the `SET TABLESPACE` command of `ALTER DATABASE`.
+ The name of the tablespace (in PostgreSQL) that will be associated
+ with the new database. This tablespace will be the default
+ tablespace used for objects created in this database.
+ type: string
+ template:
+ description: |-
+ Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting
+ cannot be changed. The name of the template from which to create
+ this database.
+ type: string
+ x-kubernetes-validations:
+ - message: template is immutable
+ rule: self == oldSelf
+ required:
+ - cluster
+ - name
+ - owner
+ type: object
+ x-kubernetes-validations:
+ - message: builtinLocale is only available when localeProvider is set
+ to `builtin`
+ rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin'''
+ - message: icuLocale is only available when localeProvider is set to `icu`
+ rule: '!has(self.icuLocale) || self.localeProvider == ''icu'''
+ - message: icuRules is only available when localeProvider is set to `icu`
+ rule: '!has(self.icuRules) || self.localeProvider == ''icu'''
+ status:
+ description: |-
+ Most recently observed status of the Database. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ applied:
+ description: Applied is true if the database was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: imagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ImageCatalog
+ listKind: ImageCatalogList
+ plural: imagecatalogs
+ singular: imagecatalog
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ImageCatalog is the Schema for the imagecatalogs API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: poolers.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Pooler
+ listKind: PoolerList
+ plural: poolers
+ singular: pooler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.type
+ name: Type
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Pooler is the Schema for the poolers API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the Pooler.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: |-
+ This is the cluster reference on which the Pooler will work.
+ Pooler name should never match with any cluster name within the same namespace.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ deploymentStrategy:
+ description: The deployment strategy to use for pgbouncer to replace
+ existing pods with new ones
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if DeploymentStrategyType =
+ RollingUpdate.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be scheduled above the desired number of
+ pods.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up.
+ Defaults to 25%.
+ Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ the rolling update starts, such that the total number of old and new pods do not exceed
+ 130% of desired pods. Once old pods have been killed,
+ new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ at any time during the update is at most 130% of desired pods.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ Absolute number is calculated from percentage by rounding down.
+ This can not be 0 if MaxSurge is 0.
+ Defaults to 25%.
+ Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ that the total number of pods available at all times during the update is at
+ least 70% of desired pods.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description: Type of deployment. Can be "Recreate" or "RollingUpdate".
+ Default is RollingUpdate.
+ type: string
+ type: object
+ instances:
+ default: 1
+ description: 'The number of replicas we want. Default: 1.'
+ format: int32
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this pooler.
+ properties:
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ pgbouncer:
+ description: The PgBouncer configuration
+ properties:
+ authQuery:
+ description: |-
+ The query that will be used to download the hash of the password
+ of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
+ In case it is specified, also an AuthQuerySecret has to be specified and
+ no automatic CNPG Cluster integration will be triggered.
+ type: string
+ authQuerySecret:
+ description: |-
+ The credentials of the user that need to be used for the authentication
+ query. In case it is specified, also an AuthQuery
+ (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
+ has to be specified and no automatic CNPG Cluster integration will be triggered.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Additional parameters to be passed to PgBouncer - please check
+ the CNPG documentation for a list of options you can configure
+ type: object
+ paused:
+ default: false
+ description: |-
+ When set to `true`, PgBouncer will disconnect from the PostgreSQL
+ server, first waiting for all queries to complete, and pause all new
+ client connections until this value is set to `false` (default). Internally,
+ the operator calls PgBouncer's `PAUSE` and `RESUME` commands.
+ type: boolean
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ poolMode:
+ default: session
+ description: 'The pool mode. Default: `session`.'
+ enum:
+ - session
+ - transaction
+ type: string
+ type: object
+ serviceTemplate:
+ description: Template for the Service to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the service.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information on service's
+ port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains the configurations
+ of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations of Client
+ IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic is
+ distributed to Service endpoints. Implementations can use this field as a
+ hint, but are not required to guarantee strict adherence. If the field is
+ not set, the implementation will apply its default routing strategy. If set
+ to "PreferClose", implementations should prioritize endpoints that are
+ topologically close (e.g., same zone).
+ This is a beta field and requires enabling ServiceTrafficDistribution feature.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ type: object
+ template:
+ description: The template of the Pod to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ name:
+ description: The name of the resource. Only supported for
+ certain types
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ automountServiceAccountToken:
+ description: AutomountServiceAccountToken indicates whether
+ a service account token should be automatically mounted.
+ type: boolean
+ containers:
+ description: |-
+ List of containers belonging to the pod.
+ Containers cannot currently be added or removed.
+ There must be at least one container in a Pod.
+ Cannot be updated.
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents a duration that
+ the container should sleep.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents a duration that
+ the container should sleep.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ dnsConfig:
+ description: |-
+ Specifies the DNS parameters of a pod.
+ Parameters specified here will be merged to the generated DNS
+ configuration based on DNSPolicy.
+ properties:
+ nameservers:
+ description: |-
+ A list of DNS name server IP addresses.
+ This will be appended to the base nameservers generated from DNSPolicy.
+ Duplicated nameservers will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ options:
+ description: |-
+ A list of DNS resolver options.
+ This will be merged with the base options generated from DNSPolicy.
+ Duplicated entries will be removed. Resolution options given in Options
+ will override those that appear in the base DNSPolicy.
+ items:
+ description: PodDNSConfigOption defines DNS resolver
+ options of a pod.
+ properties:
+ name:
+ description: |-
+ Name is this DNS resolver option's name.
+ Required.
+ type: string
+ value:
+ description: Value is this DNS resolver option's
+ value.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ searches:
+ description: |-
+ A list of DNS search domains for host-name lookup.
+ This will be appended to the base search paths generated from DNSPolicy.
+ Duplicated search paths will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ dnsPolicy:
+ description: |-
+ Set DNS policy for the pod.
+ Defaults to "ClusterFirst".
+ Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ To have DNS options set along with hostNetwork, you have to specify DNS policy
+ explicitly to 'ClusterFirstWithHostNet'.
+ type: string
+ enableServiceLinks:
+ description: |-
+ EnableServiceLinks indicates whether information about services should be injected into pod's
+ environment variables, matching the syntax of Docker links.
+ Optional: Defaults to true.
+ type: boolean
+ ephemeralContainers:
+ description: |-
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ items:
+ description: |-
+ An EphemeralContainer is a temporary container that you may add to an existing Pod for
+ user-initiated activities such as debugging. Ephemeral containers have no resource or
+ scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+ removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+ Pod to exceed its resource allocation.
+
+ To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+ Pod. Ephemeral containers may not be removed or restarted.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: Lifecycle is not allowed for ephemeral
+ containers.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents a duration that
+ the container should sleep.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents a duration that
+ the container should sleep.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the ephemeral container specified as a DNS_LABEL.
+ This name must be unique among all containers, init containers and ephemeral containers.
+ type: string
+ ports:
+ description: Ports are not allowed for ephemeral containers.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ already allocated to the pod.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for the container to manage the restart behavior of each
+ container within a pod.
+ This may only be set for init containers. You cannot set this field on
+ ephemeral containers.
+ type: string
+ securityContext:
+ description: |-
+ Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ targetContainerName:
+ description: |-
+ If set, the name of the container from PodSpec that this ephemeral container targets.
+ The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+
+ The container runtime must implement support for this feature. If the runtime does not
+ support namespace targeting then the result of setting this field is undefined.
+ type: string
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ hostAliases:
+ description: |-
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ file if specified.
+ items:
+ description: |-
+ HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+ pod's hosts file.
+ properties:
+ hostnames:
+ description: Hostnames for the above IP address.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ip:
+ description: IP address of the host file entry.
+ type: string
+ required:
+ - ip
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - ip
+ x-kubernetes-list-type: map
+ hostIPC:
+ description: |-
+ Use the host's ipc namespace.
+ Optional: Default to false.
+ type: boolean
+ hostNetwork:
+ description: |-
+ Host networking requested for this pod. Use the host's network namespace.
+ If this option is set, the ports that will be used must be specified.
+ Default to false.
+ type: boolean
+ hostPID:
+ description: |-
+ Use the host's pid namespace.
+ Optional: Default to false.
+ type: boolean
+ hostUsers:
+ description: |-
+ Use the host's user namespace.
+ Optional: Default to true.
+ If set to true or not present, the pod will be run in the host user namespace, useful
+ for when the pod needs a feature only available to the host user namespace, such as
+ loading a kernel module with CAP_SYS_MODULE.
+ When set to false, a new userns is created for the pod. Setting false is useful for
+ mitigating container breakout vulnerabilities even allowing users to run their
+ containers as root without actually having root privileges on the host.
+ This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ type: boolean
+ hostname:
+ description: |-
+ Specifies the hostname of the Pod
+ If not specified, the pod's hostname will be set to a system-defined value.
+ type: string
+ imagePullSecrets:
+ description: |-
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ If specified, these secrets will be passed to individual puller implementations for them to use.
+ More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ initContainers:
+ description: |-
+ List of initialization containers belonging to the pod.
+ Init containers are executed in order prior to containers being started. If any
+ init container fails, the pod is considered to have failed and is handled according
+ to its restartPolicy. The name for an init container or normal container must be
+ unique among all containers.
+ Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ The resourceRequirements of an init container are taken into account during scheduling
+ by finding the highest request/limit for each resource type, and then using the max of
+ of that value or the sum of the normal containers. Limits are applied to init containers
+ in a similar fashion.
+ Init containers cannot currently be added or removed.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents a duration that
+ the container should sleep.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents a duration that
+ the container should sleep.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for backward compatibility. There is no validation of this field and
+ lifecycle hooks will fail at runtime when it is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies a command to execute
+ in the container.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies a GRPC HealthCheckRequest.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies an HTTP GET request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies a connection to
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ nodeName:
+ description: |-
+ NodeName indicates in which node this pod is scheduled.
+ If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+ Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+ This field should not be used to express a desire for the pod to be scheduled on a specific node.
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is a selector which must be true for the pod to fit on a node.
+ Selector which must match a node's labels for the pod to be scheduled on that node.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ x-kubernetes-map-type: atomic
+ os:
+ description: |-
+ Specifies the OS of the containers in the pod.
+ Some pod and container fields are restricted if this is set.
+
+ If the OS field is set to linux, the following fields must be unset:
+ -securityContext.windowsOptions
+
+ If the OS field is set to windows, following fields must be unset:
+ - spec.hostPID
+ - spec.hostIPC
+ - spec.hostUsers
+ - spec.securityContext.appArmorProfile
+ - spec.securityContext.seLinuxOptions
+ - spec.securityContext.seccompProfile
+ - spec.securityContext.fsGroup
+ - spec.securityContext.fsGroupChangePolicy
+ - spec.securityContext.sysctls
+ - spec.shareProcessNamespace
+ - spec.securityContext.runAsUser
+ - spec.securityContext.runAsGroup
+ - spec.securityContext.supplementalGroups
+ - spec.securityContext.supplementalGroupsPolicy
+ - spec.containers[*].securityContext.appArmorProfile
+ - spec.containers[*].securityContext.seLinuxOptions
+ - spec.containers[*].securityContext.seccompProfile
+ - spec.containers[*].securityContext.capabilities
+ - spec.containers[*].securityContext.readOnlyRootFilesystem
+ - spec.containers[*].securityContext.privileged
+ - spec.containers[*].securityContext.allowPrivilegeEscalation
+ - spec.containers[*].securityContext.procMount
+ - spec.containers[*].securityContext.runAsUser
+ - spec.containers[*].securityContext.runAsGroup
+ properties:
+ name:
+ description: |-
+ Name is the name of the operating system. The currently supported values are linux and windows.
+ Additional value may be defined in future and can be one of:
+ https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+ type: string
+ required:
+ - name
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ type: object
+ preemptionPolicy:
+ description: |-
+ PreemptionPolicy is the Policy for preempting pods with lower priority.
+ One of Never, PreemptLowerPriority.
+ Defaults to PreemptLowerPriority if unset.
+ type: string
+ priority:
+ description: |-
+ The priority value. Various system components use this field to find the
+ priority of the pod. When Priority Admission Controller is enabled, it
+ prevents users from setting this field. The admission controller populates
+ this field from PriorityClassName.
+ The higher the value, the higher the priority.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the pod's priority. "system-node-critical" and
+ "system-cluster-critical" are two special keywords which indicate the
+ highest priorities with the former being the highest priority. Any other
+ name must be defined by creating a PriorityClass object with that name.
+ If not specified, the pod priority will be default or zero if there is no
+ default.
+ type: string
+ readinessGates:
+ description: |-
+ If specified, all readiness gates will be evaluated for pod readiness.
+ A pod is ready when all its containers are ready AND
+ all conditions specified in the readiness gates have status equal to "True"
+ More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ items:
+ description: PodReadinessGate contains the reference to
+ a pod condition
+ properties:
+ conditionType:
+ description: ConditionType refers to a condition in
+ the pod's condition list with matching type.
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resourceClaims:
+ description: |-
+ ResourceClaims defines which ResourceClaims must be allocated
+ and reserved before the Pod is allowed to start. The resources
+ will be made available to those containers which consume them
+ by name.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable.
+ items:
+ description: |-
+ PodResourceClaim references exactly one ResourceClaim, either directly
+ or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+ for the pod.
+
+ It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+ Containers that need access to the ResourceClaim reference it with this name.
+ properties:
+ name:
+ description: |-
+ Name uniquely identifies this resource claim inside the pod.
+ This must be a DNS_LABEL.
+ type: string
+ resourceClaimName:
+ description: |-
+ ResourceClaimName is the name of a ResourceClaim object in the same
+ namespace as this pod.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ resourceClaimTemplateName:
+ description: |-
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ object in the same namespace as this pod.
+
+ The template will be used to create a new ResourceClaim, which will
+ be bound to this pod. When this pod is deleted, the ResourceClaim
+ will also be deleted. The pod name and resource name, along with a
+ generated component, will be used to form a unique name for the
+ ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+
+ This field is immutable and no changes will be made to the
+ corresponding ResourceClaim by the control plane after creating the
+ ResourceClaim.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ resources:
+ description: |-
+ Resources is the total amount of CPU and Memory resources required by all
+ containers in the pod. It supports specifying Requests and Limits for
+ "cpu" and "memory" resource names only. ResourceClaims are not supported.
+
+ This field enables fine-grained control over resource allocation for the
+ entire pod, allowing resource sharing among containers in a pod.
+
+ This is an alpha field and requires enabling the PodLevelResources feature
+ gate.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for all containers within the pod.
+ One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ Default to Always.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ type: string
+ runtimeClassName:
+ description: |-
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ empty definition that uses the default runtime handler.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ type: string
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified scheduler.
+ If not specified, the pod will be dispatched by default scheduler.
+ type: string
+ schedulingGates:
+ description: |-
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ scheduler will not attempt to schedule the pod.
+
+ SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+ items:
+ description: PodSchedulingGate is associated to a Pod to
+ guard its scheduling.
+ properties:
+ name:
+ description: |-
+ Name of the scheduling gate.
+ Each scheduling gate must have a unique name field.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ securityContext:
+ description: |-
+ SecurityContext holds pod-level security attributes and common container settings.
+ Optional: Defaults to empty. See type description for default values of each field.
+ properties:
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod.
+ Some volume types allow the Kubelet to change the ownership of that volume
+ to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup
+ 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: |-
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ before being exposed inside Pod. This field will only apply to
+ volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir.
+ Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxChangePolicy:
+ description: |-
+ seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+ It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+ Valid values are "MountOption" and "Recursive".
+
+ "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+ This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+
+ "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+ This requires all Pods that share the same volume to use the same SELinux label.
+ It is not possible to share the same volume among privileged and unprivileged Pods.
+ Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+ whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+ CSIDriver instance. Other volumes are always re-labelled recursively.
+ "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+
+ If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+ If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+ and "Recursive" for all other volumes.
+
+ This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+
+ All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: |-
+ A list of groups applied to the first process run in each container, in
+ addition to the container's primary GID and fsGroup (if specified). If
+ the SupplementalGroupsPolicy feature is enabled, the
+ supplementalGroupsPolicy field determines whether these are in addition
+ to or instead of any group memberships defined in the container image.
+ If unspecified, no additional groups are added, though group memberships
+ defined in the container image may still be used, depending on the
+ supplementalGroupsPolicy field.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ x-kubernetes-list-type: atomic
+ supplementalGroupsPolicy:
+ description: |-
+ Defines how supplemental groups of the first container processes are calculated.
+ Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+ (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+ and the container runtime must implement support for this feature.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ sysctls:
+ description: |-
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be
+ set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options within a container's SecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of
+ the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ description: |-
+ DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+ Deprecated: Use serviceAccountName instead.
+ type: string
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ type: string
+ setHostnameAsFQDN:
+ description: |-
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ If a pod does not have FQDN, this has no effect.
+ Default to false.
+ type: boolean
+ shareProcessNamespace:
+ description: |-
+ Share a single process namespace between all of the containers in a pod.
+ When this is set containers will be able to view and signal processes from other containers
+ in the same pod, and the first process in each container will not be assigned PID 1.
+ HostPID and ShareProcessNamespace cannot both be set.
+ Optional: Default to false.
+ type: boolean
+ subdomain:
+ description: |-
+ If specified, the fully qualified Pod hostname will be "...svc.".
+ If not specified, the pod will not have a domainname at all.
+ type: string
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ If this value is nil, the default grace period will be used instead.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ Defaults to 30 seconds.
+ format: int64
+ type: integer
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ domains. Scheduler will schedule pods in a way which abides by the constraints.
+ All topologySpreadConstraints are ANDed.
+ items:
+ description: TopologySpreadConstraint specifies how to spread
+ matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ description: |-
+ List of volumes that can be mounted by containers belonging to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes
+ items:
+ description: Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: |-
+ azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ are redirected to the disk.csi.azure.com CSI driver.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode:
+ None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk
+ in the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in
+ the blob storage
+ type: string
+ fsType:
+ default: ext4
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure
+ managed data disk (only in managed availability
+ set). defaults to shared'
+ type: string
+ readOnly:
+ default: false
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: |-
+ azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ are redirected to the file.csi.azure.com CSI driver.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: |-
+ cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: 'path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default
+ is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ are redirected to the cinder.csi.openstack.org CSI driver.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers.
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about
+ the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing the
+ pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of
+ the pod: only annotations, labels, name,
+ namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the ''..'' path.
+ Must be utf-8 encoded. The first item of
+ the relative path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and then
+ exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target
+ worldwide names (WWNs)'
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+ properties:
+ driver:
+ description: driver is the name of the driver to
+ use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds
+ extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: |-
+ flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the
+ specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ image:
+ description: |-
+ image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+ The volume is resolved at pod startup depending on which PullPolicy value is provided:
+
+ - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+
+ The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+ A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+ The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+ The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+ The volume will be mounted read-only (ro) and non-executable files (noexec).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+ properties:
+ pullPolicy:
+ description: |-
+ Policy for pulling OCI objects. Possible values are:
+ Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ type: string
+ reference:
+ description: |-
+ Required: Image or artifact reference to be used.
+ Behaves in the same way as pod.spec.containers[*].image.
+ Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ default: default
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: |-
+ photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon
+ Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: |-
+ portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ is on.
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the
+ configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether
+ the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and uid
+ are supported.'
+ properties:
+ apiVersion:
+ description: Version of the
+ schema the FieldPath is written
+ in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field
+ to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the
+ relative path name of the file
+ to be created. Must not be absolute
+ or contain the ''..'' path. Must
+ be utf-8 encoded. The first item
+ of the relative path must not
+ start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name:
+ required for volumes, optional
+ for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the
+ secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description: |-
+ quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ default: /etc/ceph/keyring
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ default: rbd
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ default: admin
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: |-
+ scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+ properties:
+ fsType:
+ default: xfs
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the
+ ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the
+ ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL
+ communication with Gateway, default false
+ type: boolean
+ storageMode:
+ default: ThinProvisioned
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description: optional field specify whether the
+ Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: |-
+ storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: |-
+ vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ are redirected to the csi.vsphere.vmware.com CSI driver.
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy
+ Based Management (SPBM) profile ID associated
+ with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - containers
+ type: object
+ type: object
+ type:
+ default: rw
+ description: 'Type of service to forward traffic to. Default: `rw`.'
+ enum:
+ - rw
+ - ro
+ type: string
+ required:
+ - cluster
+ - pgbouncer
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the Pooler. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ instances:
+ description: The number of pods trying to be scheduled
+ format: int32
+ type: integer
+ secrets:
+ description: The resource version of the config object
+ properties:
+ clientCA:
+ description: The client CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ pgBouncerSecrets:
+ description: The version of the secrets used by PgBouncer
+ properties:
+ authQuery:
+ description: The auth query secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ serverCA:
+ description: The server CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ serverTLS:
+ description: The server TLS secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: publications.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Publication
+ listKind: PublicationList
+ plural: publications
+ singular: publication
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Publication is the Schema for the publications API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PublicationSpec defines the desired state of Publication
+ properties:
+ cluster:
+ description: The name of the PostgreSQL cluster that identifies the
+ "publisher"
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ dbname:
+ description: |-
+ The name of the database where the publication will be installed in
+ the "publisher" cluster
+ type: string
+ x-kubernetes-validations:
+ - message: dbname is immutable
+ rule: self == oldSelf
+ name:
+ description: The name of the publication inside PostgreSQL
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Publication parameters part of the `WITH` clause as expected by
+ PostgreSQL `CREATE PUBLICATION` command
+ type: object
+ publicationReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this publication
+ enum:
+ - delete
+ - retain
+ type: string
+ target:
+ description: Target of the publication as expected by PostgreSQL `CREATE
+ PUBLICATION` command
+ properties:
+ allTables:
+ description: |-
+ Marks the publication as one that replicates changes for all tables
+ in the database, including tables created in the future.
+ Corresponding to `FOR ALL TABLES` in PostgreSQL.
+ type: boolean
+ x-kubernetes-validations:
+ - message: allTables is immutable
+ rule: self == oldSelf
+ objects:
+ description: Just the following schema objects
+ items:
+ description: PublicationTargetObject is an object to publish
+ properties:
+ table:
+ description: |-
+ Specifies a list of tables to add to the publication. Corresponding
+ to `FOR TABLE` in PostgreSQL.
+ properties:
+ columns:
+ description: The columns to publish
+ items:
+ type: string
+ type: array
+ name:
+ description: The table name
+ type: string
+ only:
+ description: Whether to limit to the table only or include
+ all its descendants
+ type: boolean
+ schema:
+ description: The schema name
+ type: string
+ required:
+ - name
+ type: object
+ tablesInSchema:
+ description: |-
+ Marks the publication as one that replicates changes for all tables
+ in the specified list of schemas, including tables created in the
+ future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL.
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: tablesInSchema and table are mutually exclusive
+ rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema)
+ && has(self.table))
+ maxItems: 100000
+ type: array
+ x-kubernetes-validations:
+ - message: specifying a column list when the publication also
+ publishes tablesInSchema is not supported
+ rule: '!(self.exists(o, has(o.table) && has(o.table.columns))
+ && self.exists(o, has(o.tablesInSchema)))'
+ type: object
+ x-kubernetes-validations:
+ - message: allTables and objects are mutually exclusive
+ rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables)
+ && has(self.objects))
+ required:
+ - cluster
+ - dbname
+ - name
+ - target
+ type: object
+ status:
+ description: PublicationStatus defines the observed state of Publication
+ properties:
+ applied:
+ description: Applied is true if the publication was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: scheduledbackups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ScheduledBackup
+ listKind: ScheduledBackupList
+ plural: scheduledbackups
+ singular: scheduledbackup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .status.lastScheduleTime
+ name: Last Backup
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ScheduledBackup is the Schema for the scheduledbackups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ScheduledBackup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ backupOwnerReference:
+ default: none
+ description: |-
+ Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum:
+ - none
+ - self
+ - cluster
+ type: string
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ immediate:
+ description: If the first backup has to be immediately start after
+ creation or not
+ type: boolean
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ - plugin
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ schedule:
+ description: |-
+ The schedule does not follow the same format used in Kubernetes CronJobs
+ as it includes an additional seconds specifier,
+ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ type: string
+ suspend:
+ description: If this backup is suspended or not
+ type: boolean
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ - schedule
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the ScheduledBackup. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ lastCheckTime:
+ description: The latest time the schedule
+ format: date-time
+ type: string
+ lastScheduleTime:
+ description: Information when was the last time that backup was successfully
+ scheduled.
+ format: date-time
+ type: string
+ nextScheduleTime:
+ description: Next time we will run a backup
+ format: date-time
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ name: subscriptions.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Subscription
+ listKind: SubscriptionList
+ plural: subscriptions
+ singular: subscription
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.name
+ name: PG Name
+ type: string
+ - jsonPath: .status.applied
+ name: Applied
+ type: boolean
+ - description: Latest reconciliation message
+ jsonPath: .status.message
+ name: Message
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Subscription is the Schema for the subscriptions API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SubscriptionSpec defines the desired state of Subscription
+ properties:
+ cluster:
+ description: The name of the PostgreSQL cluster that identifies the
+ "subscriber"
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ dbname:
+ description: |-
+ The name of the database where the publication will be installed in
+ the "subscriber" cluster
+ type: string
+ x-kubernetes-validations:
+ - message: dbname is immutable
+ rule: self == oldSelf
+ externalClusterName:
+ description: The name of the external cluster with the publication
+ ("publisher")
+ type: string
+ name:
+ description: The name of the subscription inside PostgreSQL
+ type: string
+ x-kubernetes-validations:
+ - message: name is immutable
+ rule: self == oldSelf
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Subscription parameters part of the `WITH` clause as expected by
+ PostgreSQL `CREATE SUBSCRIPTION` command
+ type: object
+ publicationDBName:
+ description: |-
+ The name of the database containing the publication on the external
+ cluster. Defaults to the one in the external cluster definition.
+ type: string
+ publicationName:
+ description: |-
+ The name of the publication inside the PostgreSQL database in the
+ "publisher"
+ type: string
+ subscriptionReclaimPolicy:
+ default: retain
+ description: The policy for end-of-life maintenance of this subscription
+ enum:
+ - delete
+ - retain
+ type: string
+ required:
+ - cluster
+ - dbname
+ - externalClusterName
+ - name
+ - publicationName
+ type: object
+ status:
+ description: SubscriptionStatus defines the observed state of Subscription
+ properties:
+ applied:
+ description: Applied is true if the subscription was reconciled correctly
+ type: boolean
+ message:
+ description: Message is the reconciliation output message
+ type: string
+ observedGeneration:
+ description: |-
+ A sequence number representing the latest
+ desired state that was synchronized
+ format: int64
+ type: integer
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-database-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-database-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - databases/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cnpg-manager
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ - secrets/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ - pods
+ - pods/exec
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - update
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - podmonitors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups
+ - clusters
+ - databases
+ - poolers
+ - publications
+ - scheduledbackups
+ - subscriptions
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups/status
+ - databases/status
+ - publications/status
+ - scheduledbackups/status
+ - subscriptions/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusterimagecatalogs
+ - imagecatalogs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/finalizers
+ - poolers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/status
+ - poolers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ - roles
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-publication-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-publication-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - publications/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-subscription-editor-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: cloudnative-pg-kubebuilderv4
+ name: cnpg-subscription-viewer-role
+rules:
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - subscriptions/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cnpg-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cnpg-manager
+subjects:
+- kind: ServiceAccount
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: v1
+data:
+ queries: |
+ backends:
+ query: |
+ SELECT sa.datname
+ , sa.usename
+ , sa.application_name
+ , states.state
+ , COALESCE(sa.count, 0) AS total
+ , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
+ FROM ( VALUES ('active')
+ , ('idle')
+ , ('idle in transaction')
+ , ('idle in transaction (aborted)')
+ , ('fastpath function call')
+ , ('disabled')
+ ) AS states(state)
+ LEFT JOIN (
+ SELECT datname
+ , state
+ , usename
+ , COALESCE(application_name, '') AS application_name
+ , COUNT(*)
+ , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
+ FROM pg_catalog.pg_stat_activity
+ GROUP BY datname, state, usename, application_name
+ ) sa ON states.state = sa.state
+ WHERE sa.usename IS NOT NULL
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - usename:
+ usage: "LABEL"
+ description: "Name of the user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - state:
+ usage: "LABEL"
+ description: "State of the backend"
+ - total:
+ usage: "GAUGE"
+ description: "Number of backends"
+ - max_tx_duration_seconds:
+ usage: "GAUGE"
+ description: "Maximum duration of a transaction in seconds"
+
+ backends_waiting:
+ query: |
+ SELECT count(*) AS total
+ FROM pg_catalog.pg_locks blocked_locks
+ JOIN pg_catalog.pg_locks blocking_locks
+ ON blocking_locks.locktype = blocked_locks.locktype
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
+ AND blocking_locks.pid != blocked_locks.pid
+ JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
+ WHERE NOT blocked_locks.granted
+ metrics:
+ - total:
+ usage: "GAUGE"
+ description: "Total number of backends that are currently waiting on other queries"
+
+ pg_database:
+ query: |
+ SELECT datname
+ , pg_catalog.pg_database_size(datname) AS size_bytes
+ , pg_catalog.age(datfrozenxid) AS xid_age
+ , pg_catalog.mxid_age(datminmxid) AS mxid_age
+ FROM pg_catalog.pg_database
+ WHERE datallowconn
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - size_bytes:
+ usage: "GAUGE"
+ description: "Disk space used by the database"
+ - xid_age:
+ usage: "GAUGE"
+ description: "Number of transactions from the frozen XID to the current one"
+ - mxid_age:
+ usage: "GAUGE"
+ description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
+
+ pg_postmaster:
+ query: |
+ SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
+ FROM pg_catalog.pg_postmaster_start_time()
+ metrics:
+ - start_time:
+ usage: "GAUGE"
+ description: "Time at which postgres started (based on epoch)"
+
+ pg_replication:
+ query: "SELECT CASE WHEN (
+ NOT pg_catalog.pg_is_in_recovery()
+ OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_catalog.pg_is_in_recovery() AS in_recovery,
+ EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
+ (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
+ metrics:
+ - lag:
+ usage: "GAUGE"
+ description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
+ - is_wal_receiver_up:
+ usage: "GAUGE"
+ description: "Whether the instance wal_receiver is up"
+ - streaming_replicas:
+ usage: "GAUGE"
+ description: "Number of streaming replicas connected to the instance"
+
+ pg_replication_slots:
+ query: |
+ SELECT slot_name,
+ slot_type,
+ database,
+ active,
+ (CASE pg_catalog.pg_is_in_recovery()
+ WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
+ ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ END) as pg_wal_lsn_diff
+ FROM pg_catalog.pg_replication_slots
+ WHERE NOT temporary
+ metrics:
+ - slot_name:
+ usage: "LABEL"
+ description: "Name of the replication slot"
+ - slot_type:
+ usage: "LABEL"
+ description: "Type of the replication slot"
+ - database:
+ usage: "LABEL"
+ description: "Name of the database"
+ - active:
+ usage: "GAUGE"
+ description: "Flag indicating whether the slot is active"
+ - pg_wal_lsn_diff:
+ usage: "GAUGE"
+ description: "Replication lag in bytes"
+
+ pg_stat_archiver:
+ query: |
+ SELECT archived_count
+ , failed_count
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
+ , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
+ , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_archiver
+ metrics:
+ - archived_count:
+ usage: "COUNTER"
+ description: "Number of WAL files that have been successfully archived"
+ - failed_count:
+ usage: "COUNTER"
+ description: "Number of failed attempts for archiving WAL files"
+ - seconds_since_last_archival:
+ usage: "GAUGE"
+ description: "Seconds since the last successful archival operation"
+ - seconds_since_last_failure:
+ usage: "GAUGE"
+ description: "Seconds since the last failed archival operation"
+ - last_archived_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving succeeded"
+ - last_failed_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving failed"
+ - last_archived_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Archived WAL start LSN"
+ - last_failed_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Last failed WAL LSN"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_bgwriter:
+ runonserver: "<17.0.0"
+ query: |
+ SELECT checkpoints_timed
+ , checkpoints_req
+ , checkpoint_write_time
+ , checkpoint_sync_time
+ , buffers_checkpoint
+ , buffers_clean
+ , maxwritten_clean
+ , buffers_backend
+ , buffers_backend_fsync
+ , buffers_alloc
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - checkpoint_write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
+ - checkpoint_sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
+ - buffers_checkpoint:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints"
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_backend:
+ usage: "COUNTER"
+ description: "Number of buffers written directly by a backend"
+ - buffers_backend_fsync:
+ usage: "COUNTER"
+ description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+
+ pg_stat_bgwriter_17:
+ runonserver: ">=17.0.0"
+ name: pg_stat_bgwriter
+ query: |
+ SELECT buffers_clean
+ , maxwritten_clean
+ , buffers_alloc
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_checkpointer:
+ runonserver: ">=17.0.0"
+ query: |
+ SELECT num_timed AS checkpoints_timed
+ , num_requested AS checkpoints_req
+ , restartpoints_timed
+ , restartpoints_req
+ , restartpoints_done
+ , write_time
+ , sync_time
+ , buffers_written
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_checkpointer
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - restartpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it"
+ - restartpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested restartpoints that have been performed"
+ - restartpoints_done:
+ usage: "COUNTER"
+ description: "Number of restartpoints that have been performed"
+ - write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds"
+ - sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds"
+ - buffers_written:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints and restartpoints"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_database:
+ query: |
+ SELECT datname
+ , xact_commit
+ , xact_rollback
+ , blks_read
+ , blks_hit
+ , tup_returned
+ , tup_fetched
+ , tup_inserted
+ , tup_updated
+ , tup_deleted
+ , conflicts
+ , temp_files
+ , temp_bytes
+ , deadlocks
+ , blk_read_time
+ , blk_write_time
+ FROM pg_catalog.pg_stat_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of this database"
+ - xact_commit:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been committed"
+ - xact_rollback:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been rolled back"
+ - blks_read:
+ usage: "COUNTER"
+ description: "Number of disk blocks read in this database"
+ - blks_hit:
+ usage: "COUNTER"
+ description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
+ - tup_returned:
+ usage: "COUNTER"
+ description: "Number of rows returned by queries in this database"
+ - tup_fetched:
+ usage: "COUNTER"
+ description: "Number of rows fetched by queries in this database"
+ - tup_inserted:
+ usage: "COUNTER"
+ description: "Number of rows inserted by queries in this database"
+ - tup_updated:
+ usage: "COUNTER"
+ description: "Number of rows updated by queries in this database"
+ - tup_deleted:
+ usage: "COUNTER"
+ description: "Number of rows deleted by queries in this database"
+ - conflicts:
+ usage: "COUNTER"
+ description: "Number of queries canceled due to conflicts with recovery in this database"
+ - temp_files:
+ usage: "COUNTER"
+ description: "Number of temporary files created by queries in this database"
+ - temp_bytes:
+ usage: "COUNTER"
+ description: "Total amount of data written to temporary files by queries in this database"
+ - deadlocks:
+ usage: "COUNTER"
+ description: "Number of deadlocks detected in this database"
+ - blk_read_time:
+ usage: "COUNTER"
+ description: "Time spent reading data file blocks by backends in this database, in milliseconds"
+ - blk_write_time:
+ usage: "COUNTER"
+ description: "Time spent writing data file blocks by backends in this database, in milliseconds"
+
+ pg_stat_replication:
+ primary: true
+ query: |
+ SELECT usename
+ , COALESCE(application_name, '') AS application_name
+ , COALESCE(client_addr::text, '') AS client_addr
+ , COALESCE(client_port::text, '') AS client_port
+ , EXTRACT(EPOCH FROM backend_start) AS backend_start
+ , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes
+ , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes
+ , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds
+ FROM pg_catalog.pg_stat_replication
+ metrics:
+ - usename:
+ usage: "LABEL"
+ description: "Name of the replication user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - client_addr:
+ usage: "LABEL"
+ description: "Client IP address"
+ - client_port:
+ usage: "LABEL"
+ description: "Client TCP port"
+ - backend_start:
+ usage: "COUNTER"
+ description: "Time when this process was started"
+ - backend_xmin_age:
+ usage: "COUNTER"
+ description: "The age of this standby's xmin horizon"
+ - sent_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location sent on this connection"
+ - write_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location written to disk by this standby server"
+ - flush_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server"
+ - replay_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server"
+ - write_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it"
+ - flush_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it"
+ - replay_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it"
+
+ pg_settings:
+ query: |
+ SELECT name,
+ CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
+ FROM pg_catalog.pg_settings
+ WHERE vartype IN ('integer', 'real', 'bool')
+ ORDER BY 1
+ metrics:
+ - name:
+ usage: "LABEL"
+ description: "Name of the setting"
+ - setting:
+ usage: "GAUGE"
+ description: "Setting value"
+kind: ConfigMap
+metadata:
+ labels:
+ cnpg.io/reload: ""
+ name: cnpg-default-monitoring
+ namespace: cnpg-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ app.kubernetes.io/name: cloudnative-pg
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-controller-manager
+ namespace: cnpg-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: cloudnative-pg
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ spec:
+ containers:
+ - args:
+ - controller
+ - --leader-elect
+ - --max-concurrent-reconciles=10
+ - --config-map-name=cnpg-controller-manager-config
+ - --secret-name=cnpg-controller-manager-config
+ - --webhook-port=9443
+ command:
+ - /manager
+ env:
+ - name: OPERATOR_IMAGE_NAME
+ value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MONITORING_QUERIES_CONFIGMAP
+ value: cnpg-default-monitoring
+ image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0
+ livenessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsGroup: 10001
+ runAsUser: 10001
+ seccompProfile:
+ type: RuntimeDefault
+ volumeMounts:
+ - mountPath: /controller
+ name: scratch-data
+ - mountPath: /run/secrets/cnpg.io/webhook
+ name: webhook-certificates
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ serviceAccountName: cnpg-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - emptyDir: {}
+ name: scratch-data
+ - name: webhook-certificates
+ secret:
+ defaultMode: 420
+ optional: true
+ secretName: cnpg-webhook-cert
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: cnpg-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: mbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: mcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: mscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: cnpg-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: vbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: vcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-pooler
+ failurePolicy: Fail
+ name: vpooler.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - poolers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: vscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
From e947e3204c7029489ecc346ed2a73bbf59e52b65 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Tue, 24 Dec 2024 10:36:47 +0100
Subject: [PATCH 267/836] chore: update issue templates and backport & CI/CD
workflows (#6445)
Closes #6444
Signed-off-by: Gabriele Bartolini
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Leonardo Cecchi
Co-authored-by: Jonathan Gonzalez V.
Co-authored-by: Leonardo Cecchi
---
.github/ISSUE_TEMPLATE/bug.yml | 9 ++++-----
.github/renovate.json5 | 2 +-
.github/workflows/backport.yml | 6 +++---
.github/workflows/continuous-delivery.yml | 2 +-
.github/workflows/continuous-integration.yml | 2 +-
contribute/release_procedure.md | 6 ++++--
6 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml
index d02633e40a..354cf7cb17 100644
--- a/.github/ISSUE_TEMPLATE/bug.yml
+++ b/.github/ISSUE_TEMPLATE/bug.yml
@@ -48,10 +48,10 @@ body:
label: Version
description: What is the version of CloudNativePG you are running?
options:
- - "1.24.0"
- - "1.23.4"
+ - "1.25 (latest patch)"
+ - "1.24 (latest patch)"
- "trunk (main)"
- - "older in 1.23.x"
+ - "older in 1.24.x"
- "older minor (unsupported)"
validations:
required: true
@@ -60,11 +60,10 @@ body:
attributes:
label: What version of Kubernetes are you using?
options:
+ - "1.32"
- "1.31"
- "1.30"
- "1.29"
- - "1.28"
- - "1.27 (unsupported)"
- "other (unsupported)"
validations:
required: true
diff --git a/.github/renovate.json5 b/.github/renovate.json5
index 00735afaea..af007aa499 100644
--- a/.github/renovate.json5
+++ b/.github/renovate.json5
@@ -8,8 +8,8 @@
baseBranches: [
'main',
'release-1.22',
- 'release-1.23',
'release-1.24',
+ 'release-1.25'
],
ignorePaths: [
'docs/**',
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
index 7cdf956314..1a1f7a13c4 100644
--- a/.github/workflows/backport.yml
+++ b/.github/workflows/backport.yml
@@ -32,8 +32,8 @@ jobs:
labels: |
backport-requested :arrow_backward:
release-1.22
- release-1.23
release-1.24
+ release-1.25
-
name: Create comment
uses: peter-evans/create-or-update-comment@v4
@@ -56,8 +56,8 @@ jobs:
labels: |
backport-requested :arrow_backward:
release-1.22
- release-1.23
release-1.24
+ release-1.25
## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels
back-porting-pr:
@@ -73,7 +73,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- branch: [release-1.22, release-1.23, release-1.24]
+ branch: [release-1.22, release-1.24]
env:
PR: ${{ github.event.pull_request.number }}
outputs:
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 7ce21a1d15..639cb9b9c4 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -69,7 +69,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- branch: [release-1.22, release-1.23, release-1.24]
+ branch: [release-1.22, release-1.24, release-1.25]
steps:
- name: Invoke workflow with inputs
uses: benc-uk/workflow-dispatch@v1
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 5b15566bba..b57299a5ef 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -52,7 +52,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- branch: [release-1.22, release-1.23, release-1.24]
+ branch: [release-1.22, release-1.24, release-1.25]
steps:
- name: Invoke workflow with inputs
diff --git a/contribute/release_procedure.md b/contribute/release_procedure.md
index 15aa72df43..18dd0270bf 100644
--- a/contribute/release_procedure.md
+++ b/contribute/release_procedure.md
@@ -144,8 +144,10 @@ This procedure must happen immediately before starting the release.
**IMPORTANT:** Now we add support for the automatic backporting of merged pull requests from main to the new release branch.
Once the new release branch is created, go back to `main` and submit a pull
request to update the
-[backport](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/backport.yml)
-and [continuous delivery](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-delivery.yml)
+[backport](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/backport.yml),
+[continuous delivery](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-delivery.yml),
+[continuous integration](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-integration.yml)
+and [Renovate](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/renovate.json5)
workflows to support the new release branch.
And also remember to update the [github issue template](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/ISSUE_TEMPLATE/bug.yml).
From 482dd0f2f0d31704985570c82a9b83daeeb09e5f Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 26 Dec 2024 11:44:06 +0100
Subject: [PATCH 268/836] chore(deps): update helm/kind-action action to
v1.12.0 (main) (#6428)
---
.github/workflows/continuous-integration.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index b57299a5ef..4e52d12902 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -829,7 +829,7 @@ jobs:
uses: actions/checkout@v4
- name: Setting up KinD cluster
- uses: helm/kind-action@v1.11.0
+ uses: helm/kind-action@v1.12.0
with:
wait: "600s"
version: ${{ env.KIND_VERSION }}
From cbb5977a2edea8d61b3c6ba30e2f1c07e39bbff6 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 27 Dec 2024 11:40:44 +0100
Subject: [PATCH 269/836] fix(deps): update module github.com/onsi/gomega to
v1.36.2 (main) (#6461)
---
go.mod | 6 +++---
go.sum | 12 ++++++------
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/go.mod b/go.mod
index 78f66c0b64..c01aed9916 100644
--- a/go.mod
+++ b/go.mod
@@ -26,7 +26,7 @@ require (
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
github.com/onsi/ginkgo/v2 v2.22.1
- github.com/onsi/gomega v1.36.1
+ github.com/onsi/gomega v1.36.2
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
@@ -101,7 +101,7 @@ require (
github.com/xlab/treeprint v1.2.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
- golang.org/x/net v0.32.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
@@ -110,7 +110,7 @@ require (
golang.org/x/tools v0.28.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
- google.golang.org/protobuf v1.36.0 // indirect
+ google.golang.org/protobuf v1.36.1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
diff --git a/go.sum b/go.sum
index 8c96643b6a..10ec5d29be 100644
--- a/go.sum
+++ b/go.sum
@@ -144,8 +144,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
-github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
-github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
+github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -225,8 +225,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
-golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -266,8 +266,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
-google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
-google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
From fda31256fbc92d706d9210e88cbce26fd02ad119 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Tue, 31 Dec 2024 08:47:39 +0100
Subject: [PATCH 270/836] test(e2e): raise `AssertClusterStandbysAreStreaming`
timeout (#6455)
Closes #6398
Signed-off-by: Armando Ruocco
---
tests/e2e/asserts_test.go | 6 +++---
tests/e2e/cluster_microservice_test.go | 2 +-
tests/e2e/drain_node_test.go | 6 +++---
tests/e2e/fencing_test.go | 2 +-
tests/e2e/pg_data_corruption_test.go | 2 +-
5 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index ccf73d21a9..1621205e04 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -1575,7 +1575,7 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN
})
// Restored standby should be attached to restored primary
- AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140)
// Gather Credentials
appUser, appUserPass, err := testsUtils.GetCredentials(restoredClusterName, namespace,
@@ -1638,7 +1638,7 @@ func AssertClusterRestore(namespace, restoreClusterFile, tableName string) {
Expect(strings.Trim(out, "\n"), err).To(Equal("00000002"))
// Restored standby should be attached to restored primary
- AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140)
})
}
@@ -1655,7 +1655,7 @@ func AssertClusterImport(namespace, clusterWithExternalClusterName, clusterName,
AssertClusterIsReady(namespace, clusterWithExternalClusterName,
testTimeouts[testsUtils.ClusterIsReadySlow], env)
// Restored standby should be attached to restored primary
- AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 140)
})
return cluster
}
diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go
index d957e1976f..476ea0e4aa 100644
--- a/tests/e2e/cluster_microservice_test.go
+++ b/tests/e2e/cluster_microservice_test.go
@@ -316,7 +316,7 @@ func assertImportRenamesSelectedDatabase(
Expect(err).ToNot(HaveOccurred())
// We give more time than the usual 600s, since the recovery is slower
AssertClusterIsReady(namespace, importedClusterName, 1000, env)
- AssertClusterStandbysAreStreaming(namespace, importedClusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, importedClusterName, 140)
})
tableLocator := TableLocator{
diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go
index b018065c1b..d3ac7f8907 100644
--- a/tests/e2e/drain_node_test.go
+++ b/tests/e2e/drain_node_test.go
@@ -185,7 +185,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
})
AssertDataExpectedCount(env, tableLocator, 2)
- AssertClusterStandbysAreStreaming(namespace, clusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, clusterName, 140)
})
// Scenario: all the pods of a cluster are on a single node and another schedulable node exists.
@@ -304,7 +304,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
})
AssertDataExpectedCount(env, tableLocator, 2)
- AssertClusterStandbysAreStreaming(namespace, clusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, clusterName, 140)
})
})
})
@@ -409,7 +409,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
})
AssertDataExpectedCount(env, tableLocator, 2)
- AssertClusterStandbysAreStreaming(namespace, clusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, clusterName, 140)
err = nodes.UncordonAllNodes(env)
Expect(err).ToNot(HaveOccurred())
})
diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go
index 4644a4b3ab..a43e7a4191 100644
--- a/tests/e2e/fencing_test.go
+++ b/tests/e2e/fencing_test.go
@@ -158,7 +158,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
Expect(beforeFencingPodName).Should(BeEquivalentTo(currentPrimaryPodInfo.GetName()))
})
By("all followers should be streaming again from the primary instance", func() {
- AssertClusterStandbysAreStreaming(namespace, clusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, clusterName, 140)
})
checkFencingAnnotationSet(fencingMethod, nil)
})
diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go
index c8c6fbe321..c0672f4479 100644
--- a/tests/e2e/pg_data_corruption_test.go
+++ b/tests/e2e/pg_data_corruption_test.go
@@ -194,7 +194,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
})
AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
AssertDataExpectedCount(env, tableLocator, 2)
- AssertClusterStandbysAreStreaming(namespace, clusterName, 120)
+ AssertClusterStandbysAreStreaming(namespace, clusterName, 140)
}
Context("plain cluster", func() {
From 6d0f57e72389c78110d8f53eba806679d8dece40 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 1 Jan 2025 18:56:25 +0100
Subject: [PATCH 271/836] fix(deps): update github.com/cloudnative-pg/cnpg-i
digest to 7e2cfa5 (main) (#6451)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index c01aed9916..7b94c93e14 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
- github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee
+ github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc
github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
diff --git a/go.sum b/go.sum
index 10ec5d29be..eaa9073eca 100644
--- a/go.sum
+++ b/go.sum
@@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr
github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU=
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
-github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee h1:PJc4BpPu0b684BrwWzy0B5W/CSqrnUV+jv3PTrSUx8g=
-github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee/go.mod h1:ahVFn+JzYkFfv7Iwpswu4lsuC9yK7zZupM1ssaIKPFI=
+github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A=
+github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY=
github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d h1:v9IgiRYa7r+KCUxl5lCyUXdhsefZ90engPSMNLBqYmc=
github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
From 8620d2d86bd114262b80b62860e45d77ed2f6859 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 1 Jan 2025 20:04:06 +0100
Subject: [PATCH 272/836] fix(deps): update github.com/cloudnative-pg/machinery
digest to 66cd032 (main) (#6449)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 7b94c93e14..5efa611133 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc
- github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d
+ github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
diff --git a/go.sum b/go.sum
index eaa9073eca..79dc98c475 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrE
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY=
-github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d h1:v9IgiRYa7r+KCUxl5lCyUXdhsefZ90engPSMNLBqYmc=
-github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU=
+github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607 h1:Jymgt/H6iNoUZCqF6YtOqE2GgQIM1e1tWjT42B6vPJs=
+github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607/go.mod h1:n6br6GuNXcwYI5SgRArt9rM2hgZ1ElZr4vkJCWfiC/U=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
From 6c8afc4275ee439cefb44217ae0d71b5bdcb715a Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 2 Jan 2025 09:22:48 +0100
Subject: [PATCH 273/836] fix(deps): update module github.com/onsi/ginkgo/v2 to
v2.22.2 (main) (#6481)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 5efa611133..8ddc9591f2 100644
--- a/go.mod
+++ b/go.mod
@@ -25,7 +25,7 @@ require (
github.com/lib/pq v1.10.9
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
- github.com/onsi/ginkgo/v2 v2.22.1
+ github.com/onsi/ginkgo/v2 v2.22.2
github.com/onsi/gomega v1.36.2
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2
github.com/prometheus/client_golang v1.20.5
diff --git a/go.sum b/go.sum
index 79dc98c475..22f177486f 100644
--- a/go.sum
+++ b/go.sum
@@ -142,8 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
-github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
+github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
+github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
From 759760a7064ff239de7d59c1e7339bf028631216 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 2 Jan 2025 10:56:41 +0100
Subject: [PATCH 274/836] fix: linter errors for golangci-lint 1.63.1 (#6489)
The new version of golangci-lint 1.63.1 detected a new issue that we
should fix.
Closes #6488
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Leonardo Cecchi
Co-authored-by: Leonardo Cecchi
---
internal/cmd/plugin/psql/psql_test.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go
index 682705e76b..22058e5d8b 100644
--- a/internal/cmd/plugin/psql/psql_test.go
+++ b/internal/cmd/plugin/psql/psql_test.go
@@ -69,8 +69,9 @@ var _ = Describe("psql launcher", func() {
}
_, err := cmd.getPodName()
- Expect(err).To(HaveOccurred())
- Expect(err.(*ErrMissingPod)).ToNot(BeNil())
+ Expect(err).To(MatchError((&ErrMissingPod{
+ role: "primary",
+ }).Error()))
})
It("correctly composes a kubectl exec command line", func() {
From b16ceef20b058204bb14f9c5db5ab0cde725c2ba Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 2 Jan 2025 11:22:44 +0100
Subject: [PATCH 275/836] fix: add missing release-1.25 branch to backport
workflow (#6492)
Signed-off-by: Jonathan Gonzalez V.
---
.github/workflows/backport.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
index 1a1f7a13c4..37b2e1ec97 100644
--- a/.github/workflows/backport.yml
+++ b/.github/workflows/backport.yml
@@ -73,7 +73,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- branch: [release-1.22, release-1.24]
+ branch: [release-1.22, release-1.24, release-1.25]
env:
PR: ${{ github.event.pull_request.number }}
outputs:
From ebf36a59e54b5634b56ee6058d1b7551eafde083 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 2 Jan 2025 13:58:05 +0100
Subject: [PATCH 276/836] test(e2e): split e2e utils into packages (#5907)
This patch classifies the E2e utility functions into categories, creates
a package per category and moves each function into the package of the
corresponding categories.
While doing that, it removes spurious dependencies on the `env` package.
The following is the list of packages created:
* `backups`
* `cloudvendors`
* `deployments`
* `environment`
* `envsubst`
* `fencing`
* `importdb`
* `logs`
* `nodes`
* `openshift`
* `operator`
* `postgres`
* `proxy`
* `run`
* `secrets`
* `services`
* `storage`
* `timeouts`
* `yaml`
Closes #6453
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Marco Nenciarini
Signed-off-by: Francesco Canovai
Signed-off-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
Co-authored-by: Francesco Canovai
Co-authored-by: Armando Ruocco
Co-authored-by: Gabriele Quaresima
---
Makefile | 2 +-
tests/e2e/affinity_test.go | 6 +-
tests/e2e/apparmor_test.go | 5 +-
tests/e2e/architecture_test.go | 14 +-
tests/e2e/asserts_test.go | 708 +++++++++++-------
tests/e2e/backup_restore_azure_test.go | 262 ++++---
tests/e2e/backup_restore_azurite_test.go | 99 ++-
tests/e2e/backup_restore_minio_test.go | 207 +++--
tests/e2e/certificates_test.go | 297 +++++---
tests/e2e/cluster_microservice_test.go | 84 ++-
tests/e2e/cluster_monolithic_test.go | 45 +-
tests/e2e/cluster_setup_test.go | 32 +-
tests/e2e/commons_test.go | 18 +-
tests/e2e/config_support_test.go | 36 +-
tests/e2e/configuration_update_test.go | 167 +++--
tests/e2e/connection_test.go | 12 +-
.../declarative_database_management_test.go | 36 +-
tests/e2e/declarative_hibernation_test.go | 23 +-
tests/e2e/disk_space_test.go | 54 +-
tests/e2e/drain_node_test.go | 122 +--
tests/e2e/eviction_test.go | 47 +-
tests/e2e/failover_test.go | 87 ++-
tests/e2e/fastfailover_test.go | 6 +-
tests/e2e/fastswitchover_test.go | 28 +-
tests/e2e/fencing_test.go | 87 ++-
tests/e2e/hibernation_test.go | 53 +-
tests/e2e/initdb_test.go | 30 +-
tests/e2e/logs_test.go | 59 +-
tests/e2e/managed_roles_test.go | 144 ++--
tests/e2e/managed_services_test.go | 42 +-
tests/e2e/metrics_test.go | 74 +-
tests/e2e/monitoring_test.go | 29 +-
tests/e2e/nodeselector_test.go | 16 +-
tests/e2e/openshift_upgrade_test.go | 60 +-
tests/e2e/operator_deployment_test.go | 5 +-
tests/e2e/operator_ha_test.go | 44 +-
tests/e2e/operator_unavailable_test.go | 28 +-
tests/e2e/pg_basebackup_test.go | 48 +-
tests/e2e/pg_data_corruption_test.go | 30 +-
tests/e2e/pg_wal_volume_test.go | 19 +-
tests/e2e/pgbouncer_metrics_test.go | 11 +-
tests/e2e/pgbouncer_test.go | 24 +-
tests/e2e/pgbouncer_types_test.go | 19 +-
tests/e2e/probes_test.go | 8 +-
tests/e2e/publication_subscription_test.go | 80 +-
tests/e2e/pvc_deletion_test.go | 14 +-
tests/e2e/replica_mode_cluster_test.go | 191 +++--
tests/e2e/replication_slot_test.go | 53 +-
tests/e2e/rolling_update_test.go | 108 +--
tests/e2e/scaling_test.go | 14 +-
tests/e2e/storage_expansion_test.go | 19 +-
tests/e2e/suite_test.go | 45 +-
tests/e2e/switchover_test.go | 9 +-
tests/e2e/syncreplicas_test.go | 78 +-
tests/e2e/tablespaces_test.go | 422 +++++++----
tests/e2e/tolerations_test.go | 11 +-
tests/e2e/update_user_test.go | 54 +-
tests/e2e/upgrade_test.go | 145 ++--
tests/e2e/volume_snapshot_test.go | 224 +++---
tests/e2e/wal_restore_parallel_test.go | 59 +-
tests/e2e/webhook_test.go | 25 +-
tests/levels.go | 6 +-
tests/utils/{ => backups}/azurite.go | 90 ++-
tests/utils/{ => backups}/backup.go | 311 ++++----
tests/utils/backups/doc.go | 18 +
tests/utils/certificates.go | 51 --
.../utils/{ => cloudvendors}/cloud_vendor.go | 3 +-
tests/utils/cluster.go | 409 ----------
tests/utils/clusterutils/cluster.go | 227 ++++++
tests/utils/commons.go | 133 ----
tests/utils/{ => deployments}/deployment.go | 21 +-
tests/utils/doc.go | 18 +
tests/utils/environment/doc.go | 18 +
tests/utils/{ => environment}/environment.go | 145 +---
.../environment_test.go} | 2 +-
.../{job.go => environment/suite_test.go} | 26 +-
tests/utils/envsubst/doc.go | 18 +
tests/utils/{ => envsubst}/envsubst.go | 2 +-
tests/utils/{ => envsubst}/envsubst_test.go | 2 +-
.../{lease.go => envsubst/suite_test.go} | 18 +-
tests/utils/exec/exec.go | 156 ++++
tests/utils/{fence.go => fencing/fencing.go} | 44 +-
tests/utils/hibernate.go | 103 ---
tests/utils/{ => importdb}/import_db.go | 30 +-
tests/utils/logs/doc.go | 18 +
tests/utils/{ => logs}/logs.go | 15 +-
tests/utils/{ => logs}/logs_test.go | 2 +-
tests/utils/{ => logs}/suite_test.go | 4 +-
tests/utils/minio/minio.go | 31 +-
tests/utils/namespace.go | 217 ------
tests/utils/namespaces/namespace.go | 377 ++++++++++
tests/utils/nodes/{drain.go => nodes.go} | 65 +-
tests/utils/objects/objects.go | 117 +++
tests/utils/{ => openshift}/openshift.go | 88 ++-
tests/utils/operator/doc.go | 18 +
tests/utils/{ => operator}/operator.go | 156 ++--
tests/utils/{ => operator}/release.go | 3 +-
tests/utils/{ => operator}/release_test.go | 8 +-
.../{monitoring.go => operator/suite_test.go} | 24 +-
tests/utils/{ => operator}/upgrade.go | 38 +-
tests/utils/{ => operator}/webhooks.go | 74 +-
tests/utils/pod.go | 273 -------
tests/utils/pods/pod.go | 194 +++++
tests/utils/postgres.go | 57 --
tests/utils/postgres/doc.go | 18 +
tests/utils/postgres/postgres.go | 133 ++++
.../postgres_test.go} | 2 +-
tests/utils/{ => postgres}/psql_connection.go | 64 +-
tests/utils/postgres/suite_test.go | 29 +
tests/utils/{ => proxy}/proxy.go | 33 +-
.../replication_slots.go | 95 ++-
tests/utils/{ => run}/run.go | 31 +-
tests/utils/{ => secrets}/secrets.go | 40 +-
tests/utils/{ => services}/service.go | 37 +-
tests/utils/{ => storage}/storage.go | 47 +-
tests/utils/time.go | 43 --
tests/utils/{ => timeouts}/timeouts.go | 3 +-
tests/utils/utils.go | 170 +++++
tests/utils/version.go | 51 --
tests/utils/webapp.go | 88 ---
tests/utils/{ => yaml}/yaml.go | 39 +-
121 files changed, 5313 insertions(+), 4018 deletions(-)
rename tests/utils/{ => backups}/azurite.go (88%)
rename tests/utils/{ => backups}/backup.go (67%)
create mode 100644 tests/utils/backups/doc.go
delete mode 100644 tests/utils/certificates.go
rename tests/utils/{ => cloudvendors}/cloud_vendor.go (96%)
delete mode 100644 tests/utils/cluster.go
create mode 100644 tests/utils/clusterutils/cluster.go
delete mode 100644 tests/utils/commons.go
rename tests/utils/{ => deployments}/deployment.go (72%)
create mode 100644 tests/utils/doc.go
create mode 100644 tests/utils/environment/doc.go
rename tests/utils/{ => environment}/environment.go (50%)
rename tests/utils/{namespace_test.go => environment/environment_test.go} (98%)
rename tests/utils/{job.go => environment/suite_test.go} (52%)
create mode 100644 tests/utils/envsubst/doc.go
rename tests/utils/{ => envsubst}/envsubst.go (99%)
rename tests/utils/{ => envsubst}/envsubst_test.go (99%)
rename tests/utils/{lease.go => envsubst/suite_test.go} (51%)
create mode 100644 tests/utils/exec/exec.go
rename tests/utils/{fence.go => fencing/fencing.go} (61%)
delete mode 100644 tests/utils/hibernate.go
rename tests/utils/{ => importdb}/import_db.go (84%)
create mode 100644 tests/utils/logs/doc.go
rename tests/utils/{ => logs}/logs.go (93%)
rename tests/utils/{ => logs}/logs_test.go (99%)
rename tests/utils/{ => logs}/suite_test.go (93%)
delete mode 100644 tests/utils/namespace.go
create mode 100644 tests/utils/namespaces/namespace.go
rename tests/utils/nodes/{drain.go => nodes.go} (54%)
create mode 100644 tests/utils/objects/objects.go
rename tests/utils/{ => openshift}/openshift.go (70%)
create mode 100644 tests/utils/operator/doc.go
rename tests/utils/{ => operator}/operator.go (59%)
rename tests/utils/{ => operator}/release.go (97%)
rename tests/utils/{ => operator}/release_test.go (95%)
rename tests/utils/{monitoring.go => operator/suite_test.go} (51%)
rename tests/utils/{ => operator}/upgrade.go (70%)
rename tests/utils/{ => operator}/webhooks.go (65%)
delete mode 100644 tests/utils/pod.go
create mode 100644 tests/utils/pods/pod.go
delete mode 100644 tests/utils/postgres.go
create mode 100644 tests/utils/postgres/doc.go
create mode 100644 tests/utils/postgres/postgres.go
rename tests/utils/{version_test.go => postgres/postgres_test.go} (98%)
rename tests/utils/{ => postgres}/psql_connection.go (79%)
create mode 100644 tests/utils/postgres/suite_test.go
rename tests/utils/{ => proxy}/proxy.go (65%)
rename tests/utils/{ => replicationslot}/replication_slots.go (69%)
rename tests/utils/{ => run}/run.go (71%)
rename tests/utils/{ => secrets}/secrets.go (77%)
rename tests/utils/{ => services}/service.go (72%)
rename tests/utils/{ => storage}/storage.go (73%)
delete mode 100644 tests/utils/time.go
rename tests/utils/{ => timeouts}/timeouts.go (97%)
create mode 100644 tests/utils/utils.go
delete mode 100644 tests/utils/version.go
delete mode 100644 tests/utils/webapp.go
rename tests/utils/{ => yaml}/yaml.go (59%)
diff --git a/Makefile b/Makefile
index a80d95dfb7..0ebe229185 100644
--- a/Makefile
+++ b/Makefile
@@ -101,7 +101,7 @@ test: generate fmt vet manifests envtest ## Run tests.
source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\
export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=60s ;\
export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=60s ;\
- go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils
+ go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils/...
test-race: generate fmt vet manifests envtest ## Run tests enabling race detection.
mkdir -p ${ENVTEST_ASSETS_DIR} ;\
diff --git a/tests/e2e/affinity_test.go b/tests/e2e/affinity_test.go
index 3a74391ac2..69d2ce92a8 100644
--- a/tests/e2e/affinity_test.go
+++ b/tests/e2e/affinity_test.go
@@ -20,7 +20,7 @@ import (
"fmt"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -44,13 +44,13 @@ var _ = Describe("E2E Affinity", Serial, Label(tests.LabelPodScheduling), func()
})
It("can create a cluster and a pooler with required affinity", func() {
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterFile, env)
createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerFile, 3)
- _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName))
+ _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName))
Expect(err).ToNot(HaveOccurred())
AssertClusterIsReady(namespace, clusterName, 300, env)
})
diff --git a/tests/e2e/apparmor_test.go b/tests/e2e/apparmor_test.go
index 70ecabeeff..c38a37401c 100644
--- a/tests/e2e/apparmor_test.go
+++ b/tests/e2e/apparmor_test.go
@@ -22,6 +22,7 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -48,14 +49,14 @@ var _ = Describe("AppArmor support", Serial, Label(tests.LabelNoOpenshift, tests
})
It("sets up a cluster enabling AppArmor annotation feature", func() {
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterAppArmorFile, env)
By("verifying AppArmor annotations on cluster and pods", func() {
// Gathers the pod list using annotations
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
for _, pod := range podList.Items {
annotation := pod.ObjectMeta.Annotations[pkgutils.AppArmorAnnotationPrefix+"/"+specs.PostgresContainerName]
Expect(annotation).ShouldNot(BeEmpty(),
diff --git a/tests/e2e/architecture_test.go b/tests/e2e/architecture_test.go
index 34222b58cb..4aeb3992b3 100644
--- a/tests/e2e/architecture_test.go
+++ b/tests/e2e/architecture_test.go
@@ -19,7 +19,9 @@ package e2e
import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -74,21 +76,21 @@ var _ = Describe("Available Architectures", Label(tests.LabelBasic), func() {
var err error
It("manages each available architecture", func() {
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
// Fetch the operator's available architectures
- operatorPod, err := env.GetOperatorPod()
+ operatorPod, err := operator.GetPod(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
- imageArchitectures, err := utils.GetOperatorArchitectures(&operatorPod)
+ imageArchitectures, err := operator.Architectures(&operatorPod)
Expect(err).ToNot(HaveOccurred())
// Fetch the Cluster status
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
archStatus := cluster.Status.AvailableArchitectures
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 1621205e04..97c5758b96 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
+ "context"
"database/sql"
"errors"
"fmt"
@@ -34,6 +35,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"k8s.io/utils/strings/slices"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -43,17 +45,37 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/envsubst"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes"
+ objectsutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/replicationslot"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
-func AssertSwitchover(namespace string, clusterName string, env *testsUtils.TestingEnvironment) {
+func AssertSwitchover(namespace string, clusterName string, env *environment.TestingEnvironment) {
AssertSwitchoverWithHistory(namespace, clusterName, false, env)
}
-func AssertSwitchoverOnReplica(namespace string, clusterName string, env *testsUtils.TestingEnvironment) {
+func AssertSwitchoverOnReplica(namespace string, clusterName string, env *environment.TestingEnvironment) {
AssertSwitchoverWithHistory(namespace, clusterName, true, env)
}
@@ -65,7 +87,7 @@ func AssertSwitchoverWithHistory(
namespace string,
clusterName string,
isReplica bool,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
var pods []string
var oldPrimary, targetPrimary string
@@ -77,7 +99,7 @@ func AssertSwitchoverWithHistory(
Eventually(func(g Gomega) {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(cluster.Status.CurrentPrimary, err).To(
BeEquivalentTo(cluster.Status.TargetPrimary),
@@ -87,7 +109,7 @@ func AssertSwitchoverWithHistory(
oldPrimary = cluster.Status.CurrentPrimary
// Gather pod names
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
oldPodListLength = len(podList.Items)
for _, p := range podList.Items {
@@ -102,7 +124,7 @@ func AssertSwitchoverWithHistory(
By(fmt.Sprintf("setting the TargetPrimary node to trigger a switchover to %s", targetPrimary), func() {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
cluster.Status.TargetPrimary = targetPrimary
return env.Client.Status().Update(env.Ctx, cluster)
@@ -112,10 +134,10 @@ func AssertSwitchoverWithHistory(
By("waiting that the TargetPrimary become also CurrentPrimary", func() {
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.CurrentPrimary, err
- }, testTimeouts[testsUtils.NewPrimaryAfterSwitchover]).Should(BeEquivalentTo(targetPrimary))
+ }, testTimeouts[timeouts.NewPrimaryAfterSwitchover]).Should(BeEquivalentTo(targetPrimary))
})
By("waiting that the old primary become ready", func() {
@@ -147,7 +169,7 @@ func AssertSwitchoverWithHistory(
// After we finish the switchover, we should wait for the cluster to be ready
// otherwise, anyone executing this may not wait and also, the following part of the function
// may fail because the switchover hasn't properly finish yet.
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
if !isReplica {
By("confirming that the all postgres containers have *.history file after switchover", func() {
@@ -155,7 +177,7 @@ func AssertSwitchoverWithHistory(
timeout := 120
// Gather pod names
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(len(podList.Items), err).To(BeEquivalentTo(oldPodListLength))
for _, p := range podList.Items {
pods = append(pods, p.Name)
@@ -164,8 +186,9 @@ func AssertSwitchoverWithHistory(
Eventually(func() error {
count := 0
for _, pod := range pods {
- out, _, err := env.ExecCommandInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: pod,
}, nil, "sh", "-c", "ls $PGDATA/pg_wal/*.history")
@@ -197,7 +220,7 @@ func AssertCreateCluster(
namespace string,
clusterName string,
sampleFile string,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
By(fmt.Sprintf("having a %v namespace", namespace), func() {
// Creating a namespace should be quick
@@ -209,19 +232,19 @@ func AssertCreateCluster(
namespaceResource := &corev1.Namespace{}
err := env.Client.Get(env.Ctx, namespacedName, namespaceResource)
return namespaceResource.GetName(), err
- }, testTimeouts[testsUtils.NamespaceCreation]).Should(BeEquivalentTo(namespace))
+ }, testTimeouts[timeouts.NamespaceCreation]).Should(BeEquivalentTo(namespace))
})
By(fmt.Sprintf("creating a Cluster in the %v namespace", namespace), func() {
CreateResourceFromFile(namespace, sampleFile)
})
// Setting up a cluster with three pods is slow, usually 200-600s
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
}
// AssertClusterIsReady checks the cluster has as many pods as in spec, that
// none of them are going to be deleted, and that the status is Healthy
-func AssertClusterIsReady(namespace string, clusterName string, timeout int, env *testsUtils.TestingEnvironment) {
+func AssertClusterIsReady(namespace string, clusterName string, timeout int, env *environment.TestingEnvironment) {
By(fmt.Sprintf("having a Cluster %s with each instance in status ready", clusterName), func() {
// Eventually the number of ready instances should be equal to the
// amount of instances defined in the cluster and
@@ -230,13 +253,13 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env
Eventually(func(g Gomega) {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
}).Should(Succeed())
start := time.Now()
Eventually(func() (string, error) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
@@ -246,7 +269,7 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env
return fmt.Sprintf("Pod '%s' is waiting for deletion", pod.Name), nil
}
}
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.Phase, err
}
return fmt.Sprintf("Ready pod is not as expected. Spec Instances: %d, ready pods: %d \n",
@@ -254,19 +277,19 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env
utils.CountReadyPods(podList.Items)), nil
}, timeout, 2).Should(BeEquivalentTo(apiv1.PhaseHealthy),
func() string {
- cluster := testsUtils.PrintClusterResources(namespace, clusterName, env)
- nodes, _ := env.DescribeKubernetesNodes()
+ cluster := testsUtils.PrintClusterResources(env.Ctx, env.Client, namespace, clusterName)
+ kubeNodes, _ := nodes.DescribeKubernetesNodes(env.Ctx, env.Client)
return fmt.Sprintf("CLUSTER STATE\n%s\n\nK8S NODES\n%s",
- cluster, nodes)
+ cluster, kubeNodes)
},
)
if cluster.Spec.Instances != 1 {
Eventually(func(g Gomega) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred(), "cannot get cluster pod list")
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred(), "cannot find cluster primary pod")
replicaNamesList := make([]string, 0, len(podList.Items)-1)
@@ -276,8 +299,9 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env
}
}
replicaNamesString := strings.Join(replicaNamesList, ",")
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
},
@@ -297,7 +321,7 @@ func AssertClusterDefault(
namespace string,
clusterName string,
isExpectedToDefault bool,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
By("having a Cluster object populated with default values", func() {
// Eventually the number of ready instances should be equal to the
@@ -306,7 +330,7 @@ func AssertClusterDefault(
var cluster *apiv1.Cluster
Eventually(func(g Gomega) {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
g.Expect(err).ToNot(HaveOccurred())
}).Should(Succeed())
@@ -320,20 +344,20 @@ func AssertClusterDefault(
})
}
-func AssertWebhookEnabled(env *testsUtils.TestingEnvironment, mutating, validating string) {
+func AssertWebhookEnabled(env *environment.TestingEnvironment, mutating, validating string) {
By("re-setting namespace selector for all admission controllers", func() {
// Setting the namespace selector in MutatingWebhook and ValidatingWebhook
// to nil will go back to the default behaviour
- mWhc, position, err := testsUtils.GetCNPGsMutatingWebhookByName(env, mutating)
+ mWhc, position, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutating)
Expect(err).ToNot(HaveOccurred())
mWhc.Webhooks[position].NamespaceSelector = nil
- err = testsUtils.UpdateCNPGsMutatingWebhookConf(env, mWhc)
+ err = operator.UpdateMutatingWebhookConf(env.Ctx, env.Interface, mWhc)
Expect(err).ToNot(HaveOccurred())
- vWhc, position, err := testsUtils.GetCNPGsValidatingWebhookByName(env, validating)
+ vWhc, position, err := operator.GetValidatingWebhookByName(env.Ctx, env.Client, validating)
Expect(err).ToNot(HaveOccurred())
vWhc.Webhooks[position].NamespaceSelector = nil
- err = testsUtils.UpdateCNPGsValidatingWebhookConf(env, vWhc)
+ err = operator.UpdateValidatingWebhookConf(env.Ctx, env.Interface, vWhc)
Expect(err).ToNot(HaveOccurred())
})
}
@@ -346,7 +370,7 @@ func AssertUpdateSecret(
namespace string,
clusterName string,
timeout int,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
var secret corev1.Secret
@@ -367,7 +391,7 @@ func AssertUpdateSecret(
// Wait for the cluster to pick up the updated secrets version first
Eventually(func() string {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
GinkgoWriter.Printf("Error reports while retrieving cluster %v\n", err.Error())
return ""
@@ -405,19 +429,14 @@ func AssertConnection(
dbname string,
user string,
password string,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
- By(fmt.Sprintf("checking that %v service exists", service), func() {
- Eventually(func(g Gomega) {
- _, err := testsUtils.GetService(namespace, service, env)
- g.Expect(err).ToNot(HaveOccurred())
- }, RetryTimeout).Should(Succeed())
- })
-
By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() {
Eventually(func(g Gomega) {
- forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
- dbname, user, password)
+ forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ namespace, service, dbname, user, password,
+ )
defer func() {
_ = conn.Close()
forwardConn.Close()
@@ -434,16 +453,20 @@ func AssertConnection(
}
// AssertOperatorIsReady verifies that the operator is ready
-func AssertOperatorIsReady() {
+func AssertOperatorIsReady(
+ ctx context.Context,
+ crudClient ctrlclient.Client,
+ kubeInterface kubernetes.Interface,
+) {
Eventually(func() (bool, error) {
- ready, err := env.IsOperatorReady()
+ ready, err := operator.IsReady(ctx, crudClient, kubeInterface)
if ready && err == nil {
return true, nil
}
// Waiting a bit to avoid overloading the API server
time.Sleep(1 * time.Second)
return ready, err
- }, testTimeouts[testsUtils.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready")
+ }, testTimeouts[timeouts.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready")
}
type TableLocator struct {
@@ -455,18 +478,21 @@ type TableLocator struct {
}
// AssertCreateTestData create test data on a given TableLocator
-func AssertCreateTestData(env *testsUtils.TestingEnvironment, tl TableLocator) {
+func AssertCreateTestData(env *environment.TestingEnvironment, tl TableLocator) {
if tl.DatabaseName == "" {
- tl.DatabaseName = testsUtils.AppDBName
+ tl.DatabaseName = postgres.AppDBName
}
if tl.Tablespace == "" {
- tl.Tablespace = testsUtils.TablespaceDefaultName
+ tl.Tablespace = postgres.TablespaceDefaultName
}
By(fmt.Sprintf("creating test data in table %v (cluster %v, database %v, tablespace %v)",
tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() {
- forward, conn, err := testsUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
tl.Namespace,
tl.ClusterName,
tl.DatabaseName,
@@ -492,7 +518,9 @@ func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, dat
query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS image (name text,raster oid); "+
"INSERT INTO image (name, raster) VALUES ('beautiful image', lo_from_bytea(%d, '%s'));", oid, data)
- _, err := testsUtils.RunExecOverForward(env, namespace, clusterName, testsUtils.AppDBName,
+ _, err := postgres.RunExecOverForward(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix, query)
Expect(err).ToNot(HaveOccurred())
})
@@ -506,14 +534,15 @@ func insertRecordIntoTable(tableName string, value int, conn *sql.DB) {
func QueryMatchExpectationPredicate(
pod *corev1.Pod,
- dbname testsUtils.DatabaseName,
+ dbname exec.DatabaseName,
query string,
expectedOutput string,
) func(g Gomega) {
return func(g Gomega) {
// executor
- stdout, stderr, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{Namespace: pod.Namespace, PodName: pod.Name},
+ stdout, stderr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: pod.Namespace, PodName: pod.Name},
dbname,
query,
)
@@ -536,14 +565,17 @@ func databaseExistsQuery(dbName string) string {
// AssertDataExpectedCount verifies that an expected amount of rows exists on the table
func AssertDataExpectedCount(
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
tl TableLocator,
expectedValue int,
) {
By(fmt.Sprintf("verifying test data in table %v (cluster %v, database %v, tablespace %v)",
tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() {
- row, err := testsUtils.RunQueryRowOverForward(
- env,
+ row, err := postgres.RunQueryRowOverForward(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
tl.Namespace,
tl.ClusterName,
tl.DatabaseName,
@@ -565,22 +597,23 @@ func AssertLargeObjectValue(namespace, clusterName string, oid int, data string)
query := fmt.Sprintf("SELECT encode(lo_get(%v), 'escape');", oid)
Eventually(func() (string, error) {
// We keep getting the pod, since there could be a new pod with the same name
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
query)
if err != nil {
return "", err
}
return strings.Trim(stdout, "\n"), nil
- }, testTimeouts[testsUtils.LargeObject]).Should(BeEquivalentTo(data))
+ }, testTimeouts[timeouts.LargeObject]).Should(BeEquivalentTo(data))
})
}
@@ -588,18 +621,19 @@ func AssertLargeObjectValue(namespace, clusterName string, oid int, data string)
func AssertClusterStandbysAreStreaming(namespace string, clusterName string, timeout int32) {
query := "SELECT count(*) FROM pg_stat_wal_receiver"
Eventually(func() error {
- standbyPods, err := env.GetClusterReplicas(namespace, clusterName)
+ standbyPods, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return err
}
for _, pod := range standbyPods.Items {
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
if err != nil {
return err
@@ -643,12 +677,13 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout
if err := env.Client.Get(env.Ctx, podNamespacedName, pod); err != nil {
return "", err
}
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
query)
return strings.TrimSpace(out), err
}, timeout).Should(BeEquivalentTo("t"),
@@ -657,7 +692,7 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout
})
By("having all the instances ready", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By(fmt.Sprintf("restoring full cluster functionality within %v seconds", timeout), func() {
@@ -697,11 +732,12 @@ func AssertWritesResumedBeforeTimeout(namespace string, clusterName string, time
pod := &corev1.Pod{}
err := env.Client.Get(env.Ctx, namespacedName, pod)
Expect(err).ToNot(HaveOccurred())
- out, _, err := env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
- }, testsUtils.AppDBName,
+ }, postgres.AppDBName,
query,
RetryTimeout,
PollingTime,
@@ -727,7 +763,7 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) {
var cluster *apiv1.Cluster
Eventually(func() (string, error) {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.TargetPrimary, err
}, timeout).ShouldNot(Or(BeEquivalentTo(oldPrimary), BeEquivalentTo(apiv1.PendingFailoverMarker)))
@@ -755,11 +791,12 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) {
Expect(err).ToNot(HaveOccurred())
// Expect write operation to succeed
query := "CREATE TABLE IF NOT EXISTS assert_new_primary(var1 text);"
- _, _, err = env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
- }, testsUtils.AppDBName,
+ }, postgres.AppDBName,
query,
RetryTimeout,
PollingTime,
@@ -772,7 +809,7 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) {
func CheckPointAndSwitchWalOnPrimary(namespace, clusterName string) string {
var latestWAL string
By("trigger checkpoint and switch wal on primary", func() {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
primary := pod.GetName()
latestWAL = switchWalAndGetLatestArchive(namespace, primary)
@@ -785,7 +822,7 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) {
var latestWALPath string
// Create a WAL on the primary and check if it arrives at minio, within a short time
By("archiving WALs and verifying they exist", func() {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
primary := pod.GetName()
latestWAL := switchWalAndGetLatestArchive(namespace, primary)
@@ -796,13 +833,13 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) {
Eventually(func() (int, error) {
// WALs are compressed with gzip in the fixture
return minio.CountFiles(minioEnv, latestWALPath)
- }, testTimeouts[testsUtils.WalsInMinio]).Should(BeEquivalentTo(1))
+ }, testTimeouts[timeouts.WalsInMinio]).Should(BeEquivalentTo(1))
})
}
func AssertScheduledBackupsAreScheduled(namespace string, backupYAMLPath string, timeout int) {
CreateResourceFromFile(namespace, backupYAMLPath)
- scheduledBackupName, err := env.GetResourceNameFromYAML(backupYAMLPath)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupYAMLPath)
Expect(err).NotTo(HaveOccurred())
// We expect the scheduled backup to be scheduled before a
@@ -872,15 +909,16 @@ func getScheduledBackupCompleteBackupsCount(namespace string, scheduledBackupNam
func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) {
By(fmt.Sprintf("verifying that postgres recovery mode is %v", expectedValue), func() {
Eventually(func() (string, error) {
- stdOut, stdErr, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdOut, stdErr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
"select pg_is_in_recovery();")
if err != nil {
- GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr)
+ GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", stdOut, stdErr)
}
return strings.Trim(stdOut, "\n"), err
}, 300, 10).Should(BeEquivalentTo(boolPGOutput(expectedValue)))
@@ -917,12 +955,13 @@ func AssertReplicaModeCluster(
AssertCreateTestData(env, tableLocator)
By("creating replica cluster", func() {
- replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample)
+ replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSample)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, replicaClusterName, replicaClusterSample, env)
// Get primary from replica cluster
Eventually(func() error {
- primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName)
+ primaryReplicaCluster, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ replicaClusterName)
return err
}, 30, 3).Should(Succeed())
AssertPgRecoveryMode(primaryReplicaCluster, true)
@@ -930,20 +969,24 @@ func AssertReplicaModeCluster(
By("checking data have been copied correctly in replica cluster", func() {
Eventually(func() (string, error) {
- stdOut, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdOut, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryReplicaCluster.Namespace,
PodName: primaryReplicaCluster.Name,
},
- testsUtils.DatabaseName(srcClusterDBName),
+ exec.DatabaseName(srcClusterDBName),
checkQuery)
return strings.Trim(stdOut, "\n"), err
}, 180, 10).Should(BeEquivalentTo("2"))
})
By("writing some new data to the source cluster", func() {
- forwardSource, connSource, err := testsUtils.ForwardPSQLConnection(
- env,
+ forwardSource, connSource, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
srcClusterName,
srcClusterDBName,
@@ -959,12 +1002,13 @@ func AssertReplicaModeCluster(
By("checking new data have been copied correctly in replica cluster", func() {
Eventually(func() (string, error) {
- stdOut, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdOut, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryReplicaCluster.Namespace,
PodName: primaryReplicaCluster.Name,
},
- testsUtils.DatabaseName(srcClusterDBName),
+ exec.DatabaseName(srcClusterDBName),
checkQuery)
return strings.Trim(stdOut, "\n"), err
}, 180, 15).Should(BeEquivalentTo("3"))
@@ -974,9 +1018,9 @@ func AssertReplicaModeCluster(
// verify the replica database created followed the source database, rather than
// default to the "app" db and user
By("checking that in replica cluster there is no database app and user app", func() {
- Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName,
databaseExistsQuery("app"), "f"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName,
roleExistsQuery("app"), "f"), 30).Should(Succeed())
})
}
@@ -1001,7 +1045,9 @@ func AssertDetachReplicaModeCluster(
var referenceTime time.Time
By("taking the reference time before the detaching", func() {
Eventually(func(g Gomega) {
- referenceCondition, err := testsUtils.GetConditionsInClusterStatus(namespace, replicaClusterName, env,
+ referenceCondition, err := backups.GetConditionsInClusterStatus(
+ env.Ctx, env.Client,
+ namespace, replicaClusterName,
apiv1.ConditionClusterReady)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(referenceCondition.Status).To(BeEquivalentTo(corev1.ConditionTrue))
@@ -1012,7 +1058,7 @@ func AssertDetachReplicaModeCluster(
By("disabling the replica mode", func() {
Eventually(func(g Gomega) {
- _, _, err := testsUtils.RunUnchecked(fmt.Sprintf(
+ _, _, err := run.Unchecked(fmt.Sprintf(
"kubectl patch cluster %v -n %v -p '{\"spec\":{\"replica\":{\"enabled\":false}}}'"+
" --type='merge'",
replicaClusterName, namespace))
@@ -1022,16 +1068,18 @@ func AssertDetachReplicaModeCluster(
By("ensuring the replica cluster got promoted and restarted", func() {
Eventually(func(g Gomega) {
- cluster, err := env.GetCluster(namespace, replicaClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, replicaClusterName)
g.Expect(err).ToNot(HaveOccurred())
- condition, err := testsUtils.GetConditionsInClusterStatus(namespace, cluster.Name, env,
+ condition, err := backups.GetConditionsInClusterStatus(
+ env.Ctx, env.Client,
+ namespace, cluster.Name,
apiv1.ConditionClusterReady)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(condition).ToNot(BeNil())
g.Expect(condition.Status).To(BeEquivalentTo(corev1.ConditionTrue))
g.Expect(condition.LastTransitionTime.Time).To(BeTemporally(">", referenceTime))
}).WithTimeout(60 * time.Second).Should(Succeed())
- AssertClusterIsReady(namespace, replicaClusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, replicaClusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("verifying write operation on the replica cluster primary pod", func() {
@@ -1041,13 +1089,15 @@ func AssertDetachReplicaModeCluster(
var err error
// Get primary from replica cluster
- primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName)
+ primaryReplicaCluster, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ replicaClusterName)
g.Expect(err).ToNot(HaveOccurred())
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryReplicaCluster.Namespace,
PodName: primaryReplicaCluster.Name,
- }, testsUtils.DatabaseName(srcDatabaseName),
+ }, exec.DatabaseName(srcDatabaseName),
query,
)
g.Expect(err).ToNot(HaveOccurred())
@@ -1057,9 +1107,9 @@ func AssertDetachReplicaModeCluster(
By("verifying the replica database doesn't exist in the replica cluster", func() {
// Application database configuration is skipped for replica clusters,
// so we expect these to not be present
- Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName,
databaseExistsQuery(replicaDatabaseName), "f"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName,
roleExistsQuery(replicaUserName), "f"), 30).Should(Succeed())
})
@@ -1074,11 +1124,12 @@ func AssertDetachReplicaModeCluster(
})
By("verifying that replica cluster was not modified", func() {
- outTables, stdErr, err := env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ outTables, stdErr, err := exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryReplicaCluster.Namespace,
PodName: primaryReplicaCluster.Name,
- }, testsUtils.DatabaseName(srcDatabaseName),
+ }, exec.DatabaseName(srcDatabaseName),
"\\dt",
RetryTimeout,
PollingTime,
@@ -1091,10 +1142,14 @@ func AssertDetachReplicaModeCluster(
})
}
-func AssertWritesToReplicaFails(namespace, service, appDBName, appDBUser, appDBPass string) {
+func AssertWritesToReplicaFails(
+ namespace, service, appDBName, appDBUser, appDBPass string,
+) {
By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() {
- forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
- appDBName, appDBUser, appDBPass)
+ forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ namespace, service, appDBName, appDBUser, appDBPass,
+ )
defer func() {
_ = conn.Close()
forwardConn.Close()
@@ -1118,8 +1173,10 @@ func AssertWritesToReplicaFails(namespace, service, appDBName, appDBUser, appDBP
func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) {
By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() {
- forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service,
- appDBName, appDBUser, appDBPass)
+ forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ namespace, service, appDBName, appDBUser, appDBPass,
+ )
defer func() {
_ = conn.Close()
forwardConn.Close()
@@ -1170,7 +1227,7 @@ func AssertFastFailOver(
})
By("having a Cluster with three instances ready", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
// Node 1 should be the primary, so the -rw service should
@@ -1208,7 +1265,9 @@ func AssertFastFailOver(
", PRIMARY KEY (id)" +
")"
- _, err = testsUtils.RunExecOverForward(env, namespace, clusterName, testsUtils.AppDBName,
+ _, err = postgres.RunExecOverForward(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix, query)
Expect(err).ToNot(HaveOccurred())
})
@@ -1219,11 +1278,11 @@ func AssertFastFailOver(
// on the postgres primary. We make sure that the first
// records appear on the database before moving to the next
// step.
- _, _, err = testsUtils.Run("kubectl create -n " + namespace +
+ _, _, err = run.Run("kubectl create -n " + namespace +
" -f " + webTestFile)
Expect(err).ToNot(HaveOccurred())
- _, _, err = testsUtils.Run("kubectl create -n " + namespace +
+ _, _, err = run.Run("kubectl create -n " + namespace +
" -f " + webTestJob)
Expect(err).ToNot(HaveOccurred())
@@ -1239,12 +1298,13 @@ func AssertFastFailOver(
if err = env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod); err != nil {
return "", err
}
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
query)
return strings.TrimSpace(out), err
}, RetryTimeout).Should(BeEquivalentTo("t"))
@@ -1256,7 +1316,7 @@ func AssertFastFailOver(
GracePeriodSeconds: &quickDeletionPeriod,
}
lm := clusterName + "-1"
- err = env.DeletePod(namespace, lm, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, lm, quickDelete)
Expect(err).ToNot(HaveOccurred())
})
@@ -1268,7 +1328,7 @@ func AssertFastFailOver(
func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsCount, secretsCount int) {
By("verifying the custom metrics ConfigMaps and Secrets exist", func() {
// Create the ConfigMaps and a Secret
- _, _, err := testsUtils.Run("kubectl apply -n " + namespace + " -f " + sampleFile)
+ _, _, err := run.Run("kubectl apply -n " + namespace + " -f " + sampleFile)
Expect(err).ToNot(HaveOccurred())
// Check configmaps exist
@@ -1295,7 +1355,7 @@ func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsC
}
func AssertCreationOfTestDataForTargetDB(
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
namespace,
clusterName,
targetDBName,
@@ -1303,26 +1363,33 @@ func AssertCreationOfTestDataForTargetDB(
) {
By(fmt.Sprintf("creating target database '%v' and table '%v'", targetDBName, tableName), func() {
// We need to gather the cluster primary to create the database via superuser
- currentPrimary, err := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- appUser, _, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env)
+ appUser, _, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ clusterName, namespace, apiv1.ApplicationUserSecretSuffix,
+ )
Expect(err).ToNot(HaveOccurred())
// Create database
createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER %v", targetDBName, appUser)
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: currentPrimary.Namespace,
PodName: currentPrimary.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
createDBQuery)
Expect(err).ToNot(HaveOccurred())
// Open a connection to the newly created database
- forward, conn, err := testsUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
targetDBName,
@@ -1370,7 +1437,7 @@ func AssertApplicationDatabaseConnection(
Expect(err).ToNot(HaveOccurred())
appPassword = string(appSecret.Data["password"])
}
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
AssertConnection(namespace, rwService, appDB, appUser, appPassword, env)
})
@@ -1378,11 +1445,11 @@ func AssertApplicationDatabaseConnection(
func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, cluster *apiv1.Cluster) {
By("collect and verify metric being exposed with target databases", func() {
- podList, err := env.GetClusterPodList(namespace, cluster.Name)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, cluster.Name)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
podName := pod.GetName()
- out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled())
+ out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, cluster.IsMetricsTLSEnabled())
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out,
fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(),
@@ -1407,7 +1474,10 @@ func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, clu
func CreateAndAssertServerCertificatesSecrets(
namespace, clusterName, caSecName, tlsSecName string, includeCAPrivateKey bool,
) {
- cluster, caPair, err := testsUtils.CreateSecretCA(namespace, clusterName, caSecName, includeCAPrivateKey, env)
+ cluster, caPair, err := secrets.CreateSecretCA(
+ env.Ctx, env.Client,
+ namespace, clusterName, caSecName, includeCAPrivateKey,
+ )
Expect(err).ToNot(HaveOccurred())
serverPair, err := caPair.CreateAndSignPair(cluster.GetServiceReadWriteName(), certs.CertTypeServer,
@@ -1422,7 +1492,9 @@ func CreateAndAssertServerCertificatesSecrets(
func CreateAndAssertClientCertificatesSecrets(
namespace, clusterName, caSecName, tlsSecName, userSecName string, includeCAPrivateKey bool,
) {
- _, caPair, err := testsUtils.CreateSecretCA(namespace, clusterName, caSecName, includeCAPrivateKey, env)
+ _, caPair, err := secrets.CreateSecretCA(
+ env.Ctx, env.Client,
+ namespace, clusterName, caSecName, includeCAPrivateKey)
Expect(err).ToNot(HaveOccurred())
// Sign tls certificates for streaming_replica user
@@ -1452,7 +1524,9 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str
"sslrootcert=/etc/secrets/ca/ca.crt "+
"dbname=app user=app sslmode=verify-full", clusterName, namespace)
timeout := time.Second * 10
- stdout, stderr, err := env.ExecCommand(env.Ctx, appPod, appPod.Spec.Containers[0].Name, &timeout,
+ stdout, stderr, err := exec.Command(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ appPod, appPod.Spec.Containers[0].Name, &timeout,
"psql", dsn, "-tAc", "SELECT 1")
return stdout, stderr, err
}, 360).Should(BeEquivalentTo("1\n"))
@@ -1461,34 +1535,38 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str
func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string) {
By("Async Replication into external cluster", func() {
- restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile)
+ restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile)
Expect(err).ToNot(HaveOccurred())
// Add additional data to the source cluster
- sourceClusterName, err := env.GetResourceNameFromYAML(sourceClusterFile)
+ sourceClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterFile)
Expect(err).ToNot(HaveOccurred())
CreateResourceFromFile(namespace, restoreClusterFile)
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
// Test data should be present on restored primary
- restoredPrimary, err := env.GetClusterPrimary(namespace, restoredClusterName)
+ restoredPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, restoredClusterName)
Expect(err).ToNot(HaveOccurred())
// We need the credentials from the source cluster because the replica cluster
// doesn't create the credentials on its own namespace
- appUser, appUserPass, err := testsUtils.GetCredentials(
+ appUser, appUserPass, err := secrets.GetCredentials(
+ env.Ctx,
+ env.Client,
sourceClusterName,
namespace,
apiv1.ApplicationUserSecretSuffix,
- env,
)
Expect(err).ToNot(HaveOccurred())
- forwardRestored, connRestored, err := testsUtils.ForwardPSQLConnectionWithCreds(
- env,
+ forwardRestored, connRestored, err := postgres.ForwardPSQLConnectionWithCreds(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
restoredClusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
appUser,
appUserPass,
)
@@ -1504,11 +1582,14 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile,
Expect(err).ToNot(HaveOccurred())
Expect(countString).To(BeEquivalentTo("2"))
- forwardSource, connSource, err := testsUtils.ForwardPSQLConnection(
- env,
+ forwardSource, connSource, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
sourceClusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -1523,46 +1604,52 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile,
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: sourceClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 3)
- cluster, err := env.GetCluster(namespace, restoredClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName)
Expect(err).ToNot(HaveOccurred())
expectedReplicas := cluster.Spec.Instances - 1
// Cascading replicas should be attached to primary replica
- connectedReplicas, err := testsUtils.CountReplicas(env, restoredPrimary)
+ connectedReplicas, err := postgres.CountReplicas(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ restoredPrimary, RetryTimeout,
+ )
Expect(connectedReplicas, err).To(BeEquivalentTo(expectedReplicas))
})
}
func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableName string) {
- restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile)
+ restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile)
Expect(err).ToNot(HaveOccurred())
By("Restoring a backup in a new cluster", func() {
CreateResourceFromFile(namespace, restoreClusterFile)
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
// Test data should be present on restored primary
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: restoredClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
})
By("Ensuring the restored cluster is on timeline 2", func() {
- row, err := testsUtils.RunQueryRowOverForward(
- env,
+ row, err := postgres.RunQueryRowOverForward(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
restoredClusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
"SELECT substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)",
)
@@ -1578,8 +1665,10 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN
AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140)
// Gather Credentials
- appUser, appUserPass, err := testsUtils.GetCredentials(restoredClusterName, namespace,
- apiv1.ApplicationUserSecretSuffix, env)
+ appUser, appUserPass, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ restoredClusterName, namespace,
+ apiv1.ApplicationUserSecretSuffix)
Expect(err).ToNot(HaveOccurred())
secretName := restoredClusterName + apiv1.ApplicationUserSecretSuffix
@@ -1588,9 +1677,10 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN
namespace,
restoredClusterName,
appUser,
- testsUtils.AppDBName,
+ postgres.AppDBName,
appUserPass,
- secretName)
+ secretName,
+ )
})
By("update user application password for restored cluster and verify connectivity", func() {
@@ -1601,39 +1691,41 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN
namespace,
restoredClusterName,
appUser,
- testsUtils.AppDBName,
+ postgres.AppDBName,
newPassword,
- secretName)
+ secretName,
+ )
})
}
func AssertClusterRestore(namespace, restoreClusterFile, tableName string) {
- restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile)
+ restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile)
Expect(err).ToNot(HaveOccurred())
By("Restoring a backup in a new cluster", func() {
CreateResourceFromFile(namespace, restoreClusterFile)
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
// Test data should be present on restored primary
primary := restoredClusterName + "-1"
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: restoredClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
// Restored primary should be on timeline 2
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
"select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)")
Expect(strings.Trim(out, "\n"), err).To(Equal("00000002"))
@@ -1648,12 +1740,12 @@ func AssertClusterImport(namespace, clusterWithExternalClusterName, clusterName,
var cluster *apiv1.Cluster
By("Importing Database in a new cluster", func() {
var err error
- cluster, err = testsUtils.ImportDatabaseMicroservice(namespace, clusterName,
- clusterWithExternalClusterName, "", databaseName, env)
+ cluster, err = importdb.ImportDatabaseMicroservice(env.Ctx, env.Client, namespace, clusterName,
+ clusterWithExternalClusterName, "", databaseName)
Expect(err).ToNot(HaveOccurred())
// We give more time than the usual 600s, since the recovery is slower
AssertClusterIsReady(namespace, clusterWithExternalClusterName,
- testTimeouts[testsUtils.ClusterIsReadySlow], env)
+ testTimeouts[timeouts.ClusterIsReadySlow], env)
// Restored standby should be attached to restored primary
AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 140)
})
@@ -1698,7 +1790,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) {
Eventually(func() error {
cmd := fmt.Sprintf("kubectl patch ScheduledBackup %v -n %v -p '{\"spec\":{\"suspend\":true}}' "+
"--type='merge'", scheduledBackupName, namespace)
- _, _, err = testsUtils.RunUnchecked(cmd)
+ _, _, err = run.Unchecked(cmd)
if err != nil {
return err
}
@@ -1746,7 +1838,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) {
Eventually(func() error {
cmd := fmt.Sprintf("kubectl patch ScheduledBackup %v -n %v -p '{\"spec\":{\"suspend\":false}}' "+
"--type='merge'", scheduledBackupName, namespace)
- _, _, err = testsUtils.RunUnchecked(cmd)
+ _, _, err = run.Unchecked(cmd)
if err != nil {
return err
}
@@ -1775,20 +1867,23 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) {
func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, tableName, lsn string) {
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
// Gather the recovered cluster primary
- primaryInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
secretName := clusterName + apiv1.ApplicationUserSecretSuffix
By("Ensuring the restored cluster is on timeline 3", func() {
// Restored primary should be on timeline 3
- row, err := testsUtils.RunQueryRowOverForward(
- env,
+ row, err := postgres.RunQueryRowOverForward(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
"select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)",
)
@@ -1800,7 +1895,9 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
Expect(currentWalLsn).To(Equal(lsn))
// Restored standby should be attached to restored primary
- Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2))
+ Expect(postgres.CountReplicas(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ primaryInfo, RetryTimeout)).To(BeEquivalentTo(2))
})
By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() {
@@ -1808,15 +1905,16 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
})
// Gather credentials
- appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix,
- env)
+ appUser, appUserPass, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ clusterName, namespace, apiv1.ApplicationUserSecretSuffix)
Expect(err).ToNot(HaveOccurred())
By("checking the restored cluster with auto generated app password connectable", func() {
@@ -1824,9 +1922,10 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
namespace,
clusterName,
appUser,
- testsUtils.AppDBName,
+ postgres.AppDBName,
appUserPass,
- secretName)
+ secretName,
+ )
})
By("update user application password for restored cluster and verify connectivity", func() {
@@ -1836,25 +1935,29 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta
namespace,
clusterName,
appUser,
- testsUtils.AppDBName,
+ postgres.AppDBName,
newPassword,
- secretName)
+ secretName,
+ )
})
}
func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn string) {
By("restoring a backup cluster with PITR in a new cluster", func() {
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env)
- primaryInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
+ primaryInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Restored primary should be on timeline 3
- row, err := testsUtils.RunQueryRowOverForward(
- env,
+ row, err := postgres.RunQueryRowOverForward(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
"select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)",
)
@@ -1866,7 +1969,9 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str
Expect(currentWalLsn).To(Equal(lsn))
// Restored standby should be attached to restored primary
- Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2))
+ Expect(postgres.CountReplicas(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ primaryInfo, RetryTimeout)).To(BeEquivalentTo(2))
})
By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() {
@@ -1874,7 +1979,7 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
@@ -1883,7 +1988,7 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str
func AssertArchiveConditionMet(namespace, clusterName, timeout string) {
By("Waiting for the condition", func() {
- out, _, err := testsUtils.Run(fmt.Sprintf(
+ out, _, err := run.Run(fmt.Sprintf(
"kubectl -n %s wait --for=condition=ContinuousArchiving=true cluster/%s --timeout=%s",
namespace, clusterName, timeout))
Expect(err).ToNot(HaveOccurred())
@@ -1894,21 +1999,23 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) {
// switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file
func switchWalAndGetLatestArchive(namespace, podName string) string {
- _, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: podName,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
"CHECKPOINT;")
Expect(err).ToNot(HaveOccurred())
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: podName,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
"SELECT pg_walfile_name(pg_switch_wal());")
Expect(err).ToNot(HaveOccurred())
@@ -1918,7 +2025,7 @@ func switchWalAndGetLatestArchive(namespace, podName string) string {
func createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerYamlFilePath string, expectedInstanceCount int) {
CreateResourceFromFile(namespace, poolerYamlFilePath)
Eventually(func() (int32, error) {
- poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
// Wait for the deployment to be ready
deployment := &appsv1.Deployment{}
@@ -1937,7 +2044,7 @@ func assertPgBouncerPoolerDeploymentStrategy(
) {
By("verify pooler deployment has expected rolling update configuration", func() {
Eventually(func() bool {
- poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
// Wait for the deployment to be ready
deployment := &appsv1.Deployment{}
@@ -1957,7 +2064,7 @@ func assertPgBouncerPoolerDeploymentStrategy(
// assertPGBouncerPodsAreReady verifies if PGBouncer pooler pods are ready
func assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath string, expectedPodCount int) {
Eventually(func() (bool, error) {
- poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
@@ -1995,12 +2102,14 @@ func assertReadWriteConnectionUsingPgBouncerService(
poolerYamlFilePath string,
isPoolerRW bool,
) {
- poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerService, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
- appUser, generatedAppUserPassword, err := testsUtils.GetCredentials(
- clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env)
+
+ appUser, generatedAppUserPassword, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ clusterName, namespace, apiv1.ApplicationUserSecretSuffix)
Expect(err).ToNot(HaveOccurred())
- AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env)
+ AssertConnection(namespace, poolerService, postgres.AppDBName, appUser, generatedAppUserPassword, env)
// verify that, if pooler type setup read write then it will allow both read and
// write operations or if pooler type setup read only then it will allow only read operations
@@ -2015,7 +2124,7 @@ func assertReadWriteConnectionUsingPgBouncerService(
func assertPodIsRecreated(namespace, poolerSampleFile string) {
var podNameBeforeDelete string
- poolerName, err := env.GetResourceNameFromYAML(poolerSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerSampleFile)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("deleting pooler '%s' pod", poolerName), func() {
@@ -2029,7 +2138,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) {
// deleting pgbouncer pod
cmd := fmt.Sprintf("kubectl delete pod %s -n %s", podNameBeforeDelete, namespace)
- _, _, err = testsUtils.Run(cmd)
+ _, _, err = run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("verifying pooler '%s' pod has been recreated", poolerName), func() {
@@ -2056,7 +2165,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) {
func assertDeploymentIsRecreated(namespace, poolerSampleFile string) {
var deploymentUID types.UID
- poolerName, err := env.GetResourceNameFromYAML(poolerSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerSampleFile)
Expect(err).ToNot(HaveOccurred())
deploymentNamespacedName := types.NamespacedName{
@@ -2068,7 +2177,7 @@ func assertDeploymentIsRecreated(namespace, poolerSampleFile string) {
err := env.Client.Get(env.Ctx, deploymentNamespacedName, deployment)
g.Expect(err).ToNot(HaveOccurred())
}).Should(Succeed())
- err = testsUtils.DeploymentWaitForReady(env, deployment, 60)
+ err = deployments.WaitForReady(env.Ctx, env.Client, deployment, 60)
Expect(err).ToNot(HaveOccurred())
deploymentName := deployment.GetName()
@@ -2097,7 +2206,7 @@ func assertDeploymentIsRecreated(namespace, poolerSampleFile string) {
}, 300).ShouldNot(BeEquivalentTo(deploymentUID))
})
By(fmt.Sprintf("new '%s' deployment has new pods ready", deploymentName), func() {
- err := testsUtils.DeploymentWaitForReady(env, deployment, 120)
+ err := deployments.WaitForReady(env.Ctx, env.Client, deployment, 120)
Expect(err).ToNot(HaveOccurred())
})
By("verifying UIDs of pods have changed", func() {
@@ -2124,7 +2233,7 @@ func assertPGBouncerEndpointsContainsPodsIP(
) {
var pgBouncerPods []*corev1.Pod
endpoint := &corev1.Endpoints{}
- endpointName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ endpointName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
Eventually(func(g Gomega) {
@@ -2132,7 +2241,7 @@ func assertPGBouncerEndpointsContainsPodsIP(
g.Expect(err).ToNot(HaveOccurred())
}).Should(Succeed())
- poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
@@ -2157,7 +2266,7 @@ func assertPGBouncerHasServiceNameInsideHostParameter(namespace, serviceName str
for _, pod := range podList.Items {
command := fmt.Sprintf("kubectl exec -n %s %s -- /bin/bash -c 'grep "+
" \"host=%s\" controller/configs/pgbouncer.ini'", namespace, pod.Name, serviceName)
- out, _, err := testsUtils.Run(command)
+ out, _, err := run.Run(command)
Expect(err).ToNot(HaveOccurred())
expectedContainedHost := fmt.Sprintf("host=%s", serviceName)
Expect(out).To(ContainSubstring(expectedContainedHost))
@@ -2166,7 +2275,10 @@ func assertPGBouncerHasServiceNameInsideHostParameter(namespace, serviceName str
// OnlineResizePVC is for verifying if storage can be automatically expanded, or not
func OnlineResizePVC(namespace, clusterName string) {
- walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env)
+ walStorageEnabled, err := storage.IsWalStorageEnabled(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ )
Expect(err).ToNot(HaveOccurred())
pvc := &corev1.PersistentVolumeClaimList{}
@@ -2192,7 +2304,7 @@ func OnlineResizePVC(namespace, clusterName string) {
namespace,
s)
Eventually(func() error {
- _, _, err := testsUtils.RunUnchecked(cmd)
+ _, _, err := run.Unchecked(cmd)
return err
}, 60, 5).Should(Succeed())
}
@@ -2222,7 +2334,10 @@ func OnlineResizePVC(namespace, clusterName string) {
}
func OfflineResizePVC(namespace, clusterName string, timeout int) {
- walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env)
+ walStorageEnabled, err := storage.IsWalStorageEnabled(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ )
Expect(err).ToNot(HaveOccurred())
By("verify PVC size before expansion", func() {
@@ -2248,64 +2363,65 @@ func OfflineResizePVC(namespace, clusterName string, timeout int) {
namespace,
s)
Eventually(func() error {
- _, _, err := testsUtils.RunUnchecked(cmd)
+ _, _, err := run.Unchecked(cmd)
return err
}, 60, 5).Should(Succeed())
}
})
By("deleting Pod and PVCs, first replicas then the primary", func() {
// Gathering cluster primary
- currentPrimary, err := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
currentPrimaryWalStorageName := currentPrimary.Name + "-wal"
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
Expect(len(podList.Items), err).To(BeEquivalentTo(3))
// Iterating through PVC list for deleting pod and PVC for storage expansion
- for _, pod := range podList.Items {
+ for _, p := range podList.Items {
// Comparing cluster pods to not be primary to ensure cluster is healthy.
// Primary will be eventually deleted
- if !specs.IsPodPrimary(pod) {
+ if !specs.IsPodPrimary(p) {
// Deleting PVC
- _, _, err = testsUtils.Run(
- "kubectl delete pvc " + pod.Name + " -n " + namespace + " --wait=false")
+ _, _, err = run.Run(
+ "kubectl delete pvc " + p.Name + " -n " + namespace + " --wait=false")
Expect(err).ToNot(HaveOccurred())
// Deleting WalStorage PVC if needed
if walStorageEnabled {
- _, _, err = testsUtils.Run(
- "kubectl delete pvc " + pod.Name + "-wal" + " -n " + namespace + " --wait=false")
+ _, _, err = run.Run(
+ "kubectl delete pvc " + p.Name + "-wal" + " -n " + namespace + " --wait=false")
Expect(err).ToNot(HaveOccurred())
}
// Deleting standby and replica pods
- err = env.DeletePod(namespace, pod.Name, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, p.Name, quickDelete)
Expect(err).ToNot(HaveOccurred())
}
}
AssertClusterIsReady(namespace, clusterName, timeout, env)
// Deleting primary pvc
- _, _, err = testsUtils.Run(
+ _, _, err = run.Run(
"kubectl delete pvc " + currentPrimary.Name + " -n " + namespace + " --wait=false")
Expect(err).ToNot(HaveOccurred())
// Deleting Primary WalStorage PVC if needed
if walStorageEnabled {
- _, _, err = testsUtils.Run(
+ _, _, err = run.Run(
"kubectl delete pvc " + currentPrimaryWalStorageName + " -n " + namespace + " --wait=false")
Expect(err).ToNot(HaveOccurred())
}
// Deleting primary pod
- err = env.DeletePod(namespace, currentPrimary.Name, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary.Name, quickDelete)
Expect(err).ToNot(HaveOccurred())
})
AssertClusterIsReady(namespace, clusterName, timeout, env)
By("verifying Cluster storage is expanded", func() {
// Gathering PVC list for comparison
- pvcList, err := env.GetPVCList(namespace)
+ pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
// Gathering PVC size and comparing with expanded value
expectedCount := 3
@@ -2332,19 +2448,22 @@ func DeleteTableUsingPgBouncerService(
namespace,
clusterName,
poolerYamlFilePath string,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
pod *corev1.Pod,
) {
- poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath)
+ poolerService, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath)
Expect(err).ToNot(HaveOccurred())
- appUser, generatedAppUserPassword, err := testsUtils.GetCredentials(
- clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env)
+
+ appUser, generatedAppUserPassword, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ clusterName, namespace, apiv1.ApplicationUserSecretSuffix,
+ )
Expect(err).ToNot(HaveOccurred())
- AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env)
+ AssertConnection(namespace, poolerService, postgres.AppDBName, appUser, generatedAppUserPassword, env)
connectionTimeout := time.Second * 10
- dsn := testsUtils.CreateDSN(poolerService, appUser, testsUtils.AppDBName, generatedAppUserPassword,
- testsUtils.Require, 5432)
+ dsn := services.CreateDSN(poolerService, appUser, postgres.AppDBName, generatedAppUserPassword,
+ services.Require, 5432)
_, _, err = env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &connectionTimeout,
"psql", dsn, "-tAc", "DROP TABLE table1")
Expect(err).ToNot(HaveOccurred())
@@ -2372,11 +2491,11 @@ func collectAndAssertDefaultMetricsPresentOnEachPod(
)
}
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
podName := pod.GetName()
- out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, tlsEnabled)
+ out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, tlsEnabled)
Expect(err).ToNot(HaveOccurred())
// error should be zero on each pod metrics
@@ -2428,11 +2547,11 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) {
)
}
By("collecting and verify set of collector metrics on each pod", func() {
- podList, err := env.GetClusterPodList(cluster.Namespace, cluster.Name)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, cluster.Namespace, cluster.Name)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
podName := pod.GetName()
- out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled())
+ out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, cluster.IsMetricsTLSEnabled())
Expect(err).ToNot(HaveOccurred())
// error should be zero on each pod metrics
@@ -2452,17 +2571,17 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) {
// YAML sample file and returns any errors
func CreateResourcesFromFileWithError(namespace, sampleFilePath string) error {
wrapErr := func(err error) error { return fmt.Errorf("on CreateResourcesFromFileWithError: %w", err) }
- yaml, err := GetYAMLContent(sampleFilePath)
+ yamlContent, err := GetYAMLContent(sampleFilePath)
if err != nil {
return wrapErr(err)
}
- objects, err := testsUtils.ParseObjectsFromYAML(yaml, namespace)
+ objects, err := yaml.ParseObjectsFromYAML(yamlContent, namespace)
if err != nil {
return wrapErr(err)
}
for _, obj := range objects {
- _, err := testsUtils.CreateObject(env, obj)
+ _, err := objectsutils.Create(env.Ctx, env.Client, obj)
if err != nil {
return wrapErr(err)
}
@@ -2488,7 +2607,7 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) {
if err != nil {
return nil, wrapErr(err)
}
- yaml := data
+ yamlContent := data
if filepath.Ext(cleanPath) == ".template" {
preRollingUpdateImg := os.Getenv("E2E_PRE_ROLLING_UPDATE_IMG")
@@ -2508,12 +2627,12 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) {
envVars["SERVER_NAME"] = serverName
}
- yaml, err = testsUtils.Envsubst(envVars, data)
+ yamlContent, err = envsubst.Envsubst(envVars, data)
if err != nil {
return nil, wrapErr(err)
}
}
- return yaml, nil
+ return yamlContent, nil
}
func buildTemplateEnvs(additionalEnvs map[string]string) map[string]string {
@@ -2537,17 +2656,17 @@ func buildTemplateEnvs(additionalEnvs map[string]string) map[string]string {
// DeleteResourcesFromFile deletes the Kubernetes objects described in the file
func DeleteResourcesFromFile(namespace, sampleFilePath string) error {
wrapErr := func(err error) error { return fmt.Errorf("in DeleteResourcesFromFile: %w", err) }
- yaml, err := GetYAMLContent(sampleFilePath)
+ yamlContent, err := GetYAMLContent(sampleFilePath)
if err != nil {
return wrapErr(err)
}
- objects, err := testsUtils.ParseObjectsFromYAML(yaml, namespace)
+ objects, err := yaml.ParseObjectsFromYAML(yamlContent, namespace)
if err != nil {
return wrapErr(err)
}
for _, obj := range objects {
- err := testsUtils.DeleteObject(env, obj)
+ err := objectsutils.Delete(env.Ctx, env.Client, obj)
if err != nil {
return wrapErr(err)
}
@@ -2558,19 +2677,20 @@ func DeleteResourcesFromFile(namespace, sampleFilePath string) error {
// Assert in the giving cluster, all the postgres db has no pending restart
func AssertPostgresNoPendingRestart(namespace, clusterName string, timeout int) {
By("waiting for all pods have no pending restart", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
query := "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)"
// Check that the new parameter has been modified in every pod
Eventually(func() (bool, error) {
noPendingRestart := true
for _, pod := range podList.Items {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
if err != nil {
return false, nil
@@ -2596,8 +2716,9 @@ func AssertBackupConditionTimestampChangedInClusterStatus(
) {
By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() {
Eventually(func() (bool, error) {
- getBackupCondition, err := testsUtils.GetConditionsInClusterStatus(
- namespace, clusterName, env, clusterConditionType)
+ getBackupCondition, err := backups.GetConditionsInClusterStatus(
+ env.Ctx, env.Client,
+ namespace, clusterName, clusterConditionType)
if err != nil {
return false, err
}
@@ -2611,12 +2732,13 @@ func AssertClusterReadinessStatusIsReached(
clusterName string,
conditionStatus apiv1.ConditionStatus,
timeout int,
- env *testsUtils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
By(fmt.Sprintf("waiting for cluster condition status in cluster '%v'", clusterName), func() {
Eventually(func() (string, error) {
- clusterCondition, err := testsUtils.GetConditionsInClusterStatus(
- namespace, clusterName, env, apiv1.ConditionClusterReady)
+ clusterCondition, err := backups.GetConditionsInClusterStatus(
+ env.Ctx, env.Client,
+ namespace, clusterName, apiv1.ConditionClusterReady)
if err != nil {
return "", err
}
@@ -2635,7 +2757,7 @@ func AssertPvcHasLabels(
By("checking PVC have the correct role labels", func() {
Eventually(func(g Gomega) {
// Gather the list of PVCs in the current namespace
- pvcList, err := env.GetPVCList(namespace)
+ pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace)
g.Expect(err).ToNot(HaveOccurred())
// Iterating through PVC list
@@ -2663,7 +2785,7 @@ func AssertPvcHasLabels(
utils.PvcRoleLabelName: ExpectedPvcRole,
utils.ClusterInstanceRoleLabelName: ExpectedRole,
}
- g.Expect(testsUtils.PvcHasLabels(pvc, expectedLabels)).To(BeTrue(),
+ g.Expect(storage.PvcHasLabels(pvc, expectedLabels)).To(BeTrue(),
fmt.Sprintf("expectedLabels: %v and found actualLabels on pvc: %v",
expectedLabels, pod.GetLabels()))
}
@@ -2684,11 +2806,15 @@ func AssertReplicationSlotsOnPod(
) {
GinkgoWriter.Println("checking contain slots:", expectedSlots, "for pod:", pod.Name)
Eventually(func() ([]string, error) {
- currentSlots, err := testsUtils.GetReplicationSlotsOnPod(namespace, pod.GetName(), env)
+ currentSlots, err := replicationslot.GetReplicationSlotsOnPod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, pod.GetName(), postgres.AppDBName)
return currentSlots, err
}, 300).Should(ContainElements(expectedSlots),
func() string {
- return testsUtils.PrintReplicationSlots(namespace, clusterName, env)
+ return replicationslot.PrintReplicationSlots(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName)
})
GinkgoWriter.Println("executing replication slot assertion query on pod", pod.Name)
@@ -2705,17 +2831,20 @@ func AssertReplicationSlotsOnPod(
"AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnPrimary)
}
Eventually(func() (string, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
return strings.TrimSpace(stdout), err
}, 300).Should(BeEquivalentTo("t"),
func() string {
- return testsUtils.PrintReplicationSlots(namespace, clusterName, env)
+ return replicationslot.PrintReplicationSlots(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName)
})
}
}
@@ -2726,19 +2855,23 @@ func AssertClusterReplicationSlotsAligned(
namespace,
clusterName string,
) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
var lsnList []string
for _, pod := range podList.Items {
- out, err := testsUtils.GetReplicationSlotLsnsOnPod(namespace, clusterName, pod, env)
+ out, err := replicationslot.GetReplicationSlotLsnsOnPod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName, pod)
Expect(err).ToNot(HaveOccurred())
lsnList = append(lsnList, out...)
}
- return testsUtils.AreSameLsn(lsnList)
+ return replicationslot.AreSameLsn(lsnList)
}, 300).Should(BeEquivalentTo(true),
func() string {
- return testsUtils.PrintReplicationSlots(namespace, clusterName, env)
+ return replicationslot.PrintReplicationSlots(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName)
})
}
@@ -2746,11 +2879,12 @@ func AssertClusterReplicationSlotsAligned(
// of the cluster exist and are aligned.
func AssertClusterHAReplicationSlots(namespace, clusterName string) {
By("verifying all cluster's replication slots exist and are aligned", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
- expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(),
- env)
+ expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod(
+ env.Ctx, env.Client,
+ namespace, clusterName, pod.GetName())
Expect(err).ToNot(HaveOccurred())
AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false)
}
@@ -2761,7 +2895,7 @@ func AssertClusterHAReplicationSlots(namespace, clusterName string) {
// AssertClusterRollingRestart restarts a given cluster
func AssertClusterRollingRestart(namespace, clusterName string) {
By(fmt.Sprintf("restarting cluster %v", clusterName), func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
clusterRestarted := cluster.DeepCopy()
if clusterRestarted.Annotations == nil {
@@ -2774,14 +2908,14 @@ func AssertClusterRollingRestart(namespace, clusterName string) {
})
AssertClusterEventuallyReachesPhase(namespace, clusterName,
[]string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120)
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
}
// AssertPVCCount matches count and pvc List.
func AssertPVCCount(namespace, clusterName string, pvcCount, timeout int) {
By(fmt.Sprintf("verify cluster %v healthy pvc list", clusterName), func() {
Eventually(func(g Gomega) {
- cluster, _ := env.GetCluster(namespace, clusterName)
+ cluster, _ := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(cluster.Status.PVCCount).To(BeEquivalentTo(pvcCount))
pvcList := &corev1.PersistentVolumeClaimList{}
@@ -2816,7 +2950,7 @@ func AssertClusterEventuallyReachesPhase(namespace, clusterName string, phase []
// assertPredicateClusterHasPhase returns true if the Cluster's phase is contained in a given slice of phases
func assertPredicateClusterHasPhase(namespace, clusterName string, phase []string) func(g Gomega) {
return func(g Gomega) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(slices.Contains(phase, cluster.Status.Phase)).To(BeTrue())
}
diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go
index 65c3f8a2ed..6b335da4a3 100644
--- a/tests/e2e/backup_restore_azure_test.go
+++ b/tests/e2e/backup_restore_azure_test.go
@@ -21,7 +21,13 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -31,6 +37,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
const (
tableName = "to_restore"
)
+ AzureConfiguration := backups.NewAzureConfigurationFromEnv()
BeforeEach(func() {
if testLevelEnv.Depth < int(tests.High) {
@@ -55,23 +62,24 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
BeforeAll(func() {
const namespacePrefix = "cluster-backup-azure-blob"
var err error
- clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azureBlobSampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// The Azure Blob Storage should have been created ad-hoc for the tests.
// The credentials are retrieved from the environment variables, as we can't create
// a fixture for them
By("creating the Azure Blob Storage credentials", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
- env.AzureConfiguration.StorageAccount,
- env.AzureConfiguration.StorageKey,
- env,
+ AzureConfiguration.StorageAccount,
+ AzureConfiguration.StorageKey,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -87,22 +95,26 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
- assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration)
+ assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration)
By("uploading a backup", func() {
// We create a backup
- testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupFile, false,
+ testTimeouts[testUtils.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
// Verifying file called data.tar should be available on Azure blob storage
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar")
+ return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar")
}, 30).Should(BeNumerically(">=", 1))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
})
@@ -118,14 +130,14 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
// Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available
It("immediately starts a backup using ScheduledBackups 'immediate' option", func() {
- scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile)
Expect(err).ToNot(HaveOccurred())
AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName)
// Only one data.tar files should be present
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration,
+ return backups.CountFilesOnAzureBlobStorage(AzureConfiguration,
clusterName, "data.tar")
}, 30).Should(BeNumerically("==", 2))
})
@@ -138,19 +150,21 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
namespace,
clusterName,
backupFile,
- env.AzureConfiguration,
+ AzureConfiguration,
2,
currentTimestamp,
)
- assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration)
+ assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration)
- cluster, err := testUtils.CreateClusterFromBackupUsingPITR(
+ cluster, err := backups.CreateClusterFromBackupUsingPITR(
+ env.Ctx,
+ env.Client,
+ env.Scheme,
namespace,
restoredClusterName,
backupFile,
*currentTimestamp,
- env,
)
Expect(err).ToNot(HaveOccurred())
AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env)
@@ -158,7 +172,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
// Restore backup in a new cluster, also cover if no application database is configured
AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002")
By("deleting the restored cluster", func() {
- Expect(testUtils.DeleteObject(env, cluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed())
})
})
@@ -169,7 +183,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
It("verifies that scheduled backups can be suspended", func() {
const scheduledBackupSampleFile = fixturesDir +
"/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml"
- scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile)
Expect(err).ToNot(HaveOccurred())
By("scheduling backups", func() {
@@ -178,7 +192,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore),
// AssertScheduledBackupsImmediate creates at least two backups, we should find
// their base backups
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration,
+ return backups.CountFilesOnAzureBlobStorage(AzureConfiguration,
clusterName, "data.tar")
}, 60).Should(BeNumerically(">=", 2))
})
@@ -203,6 +217,7 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
)
currentTimestamp := new(string)
+ AzureConfiguration := backups.NewAzureConfigurationFromEnv()
BeforeEach(func() {
if testLevelEnv.Depth < int(level) {
@@ -222,23 +237,25 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
BeforeAll(func() {
const namespacePrefix = "recovery-barman-object-azure"
var err error
- clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileAzure)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// The Azure Blob Storage should have been created ad-hoc for the tests.
// The credentials are retrieved from the environment variables, as we can't create
// a fixture for them
By("creating the Azure Blob Storage credentials", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
- env.AzureConfiguration.StorageAccount,
- env.AzureConfiguration.StorageKey,
- env)
+ AzureConfiguration.StorageAccount,
+ AzureConfiguration.StorageKey,
+ )
Expect(err).ToNot(HaveOccurred())
})
@@ -246,32 +263,37 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env)
})
- It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() {
- // Write a table and some data on the "app" database
- tableLocator := TableLocator{
- Namespace: namespace,
- ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
- TableName: tableName,
- }
- AssertCreateTestData(env, tableLocator)
- assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration)
-
- By("backing up a cluster and verifying it exists on azure blob storage", func() {
- // Create the backup
- testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
- // Verifying file called data.tar should be available on Azure blob storage
- Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar")
- }, 30).Should(BeNumerically(">=", 1))
+ It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section",
+ func() {
+ // Write a table and some data on the "app" database
+ tableLocator := TableLocator{
+ Namespace: namespace,
+ ClusterName: clusterName,
+ DatabaseName: postgres.AppDBName,
+ TableName: tableName,
+ }
+ AssertCreateTestData(env, tableLocator)
+ assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration)
+
+ By("backing up a cluster and verifying it exists on azure blob storage", func() {
+ // Create the backup
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, sourceBackupFileAzure, false,
+ testTimeouts[testUtils.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
+ // Verifying file called data.tar should be available on Azure blob storage
+ Eventually(func() (int, error) {
+ return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar")
+ }, 30).Should(BeNumerically(">=", 1))
+ })
+
+ // Restoring cluster using a recovery barman object store, which is defined
+ // in the externalClusters section
+ AssertClusterRestore(namespace, externalClusterFileAzure, tableName)
})
- // Restoring cluster using a recovery barman object store, which is defined
- // in the externalClusters section
- AssertClusterRestore(namespace, externalClusterFileAzure, tableName)
- })
-
It("restores a cluster with 'PITR' from barman object using "+
"'barmanObjectStore' option in 'externalClusters' section", func() {
externalClusterName := "external-cluster-azure-pitr"
@@ -280,20 +302,22 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
namespace,
clusterName,
sourceBackupFileAzurePITR,
- env.AzureConfiguration,
+ AzureConfiguration,
1,
currentTimestamp,
)
- restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure(
+ restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzure(
+ env.Ctx,
+ env.Client,
namespace,
externalClusterName,
clusterName,
*currentTimestamp,
"backup-storage-creds",
- env.AzureConfiguration.StorageAccount,
- env.AzureConfiguration.BlobContainer,
- env)
+ AzureConfiguration.StorageAccount,
+ AzureConfiguration.BlobContainer,
+ )
Expect(err).ToNot(HaveOccurred())
// Restoring cluster using a recovery barman object store, which is defined
@@ -306,7 +330,7 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
)
By("delete restored cluster", func() {
- Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed())
})
})
})
@@ -319,22 +343,23 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
}
const namespacePrefix = "cluster-backup-azure-blob-sas"
var err error
- clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileAzureSAS)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// The Azure Blob Storage should have been created ad-hoc for the tests,
// we get the credentials from the environment variables as we can't create
// a fixture for them
By("creating the Azure Blob Container SAS Token credentials", func() {
- err = testUtils.CreateSASTokenCredentials(
+ err = backups.CreateSASTokenCredentials(
+ env.Ctx,
+ env.Client,
namespace,
- env.AzureConfiguration.StorageAccount,
- env.AzureConfiguration.StorageKey,
- env,
+ AzureConfiguration.StorageAccount,
+ AzureConfiguration.StorageKey,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -343,34 +368,39 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env)
})
- It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() {
- // Write a table and some data on the "app" database
- tableLocator := TableLocator{
- Namespace: namespace,
- ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
- TableName: tableName,
- }
- AssertCreateTestData(env, tableLocator)
-
- // Create a WAL on the primary and check if it arrives in the
- // Azure Blob Storage within a short time
- assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration)
-
- By("backing up a cluster and verifying it exists on azure blob storage", func() {
- // We create a Backup
- testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
- // Verifying file called data.tar should be available on Azure blob storage
- Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar")
- }, 30).Should(BeNumerically(">=", 1))
+ It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section",
+ func() {
+ // Write a table and some data on the "app" database
+ tableLocator := TableLocator{
+ Namespace: namespace,
+ ClusterName: clusterName,
+ DatabaseName: postgres.AppDBName,
+ TableName: tableName,
+ }
+ AssertCreateTestData(env, tableLocator)
+
+ // Create a WAL on the primary and check if it arrives in the
+ // Azure Blob Storage within a short time
+ assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration)
+
+ By("backing up a cluster and verifying it exists on azure blob storage", func() {
+ // We create a Backup
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, sourceBackupFileAzureSAS, false,
+ testTimeouts[testUtils.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
+ // Verifying file called data.tar should be available on Azure blob storage
+ Eventually(func() (int, error) {
+ return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar")
+ }, 30).Should(BeNumerically(">=", 1))
+ })
+
+ // Restore backup in a new cluster
+ AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName)
})
- // Restore backup in a new cluster
- AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName)
- })
-
It("restores a cluster with 'PITR' from barman object using "+
"'barmanObjectStore' option in 'externalClusters' section", func() {
externalClusterName := "external-cluster-azure-pitr"
@@ -379,20 +409,22 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
namespace,
clusterName,
sourceBackupFileAzurePITRSAS,
- env.AzureConfiguration,
+ AzureConfiguration,
1,
currentTimestamp,
)
- restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure(
+ restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzure(
+ env.Ctx,
+ env.Client,
namespace,
externalClusterName,
clusterName,
*currentTimestamp,
"backup-storage-creds-sas",
- env.AzureConfiguration.StorageAccount,
- env.AzureConfiguration.BlobContainer,
- env)
+ AzureConfiguration.StorageAccount,
+ AzureConfiguration.BlobContainer,
+ )
Expect(err).ToNot(HaveOccurred())
// Restoring cluster using a recovery barman object store, which is defined
@@ -405,17 +437,17 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes
)
By("delete restored cluster", func() {
- Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed())
})
})
})
})
})
-func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testUtils.AzureConfiguration) {
+func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration backups.AzureConfiguration) {
// Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time
By("archiving WALs and verifying they exist", func() {
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name)
// Define what file we are looking for in Azure.
@@ -423,7 +455,7 @@ func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration te
path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL)
// Verifying on blob storage using az
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path)
+ return backups.CountFilesOnAzureBlobStorage(configuration, clusterName, path)
}, 60).Should(BeEquivalentTo(1))
})
}
@@ -432,19 +464,23 @@ func prepareClusterForPITROnAzureBlob(
namespace string,
clusterName string,
backupSampleFile string,
- azureConfig testUtils.AzureConfiguration,
+ azureConfig backups.AzureConfiguration,
expectedVal int,
currentTimestamp *string,
) {
const tableNamePitr = "for_restore"
By("backing up a cluster and verifying it exists on Azure Blob", func() {
- testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupSampleFile, false,
+ testTimeouts[testUtils.BackupIsReady],
+ )
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar")
+ return backups.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar")
}, 30).Should(BeEquivalentTo(expectedVal))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
@@ -454,23 +490,29 @@ func prepareClusterForPITROnAzureBlob(
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableNamePitr,
}
AssertCreateTestData(env, tableLocator)
By("getting currentTimestamp", func() {
- ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env)
+ ts, err := postgres.GetCurrentTimestamp(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName,
+ )
*currentTimestamp = ts
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -480,7 +522,7 @@ func prepareClusterForPITROnAzureBlob(
Expect(err).ToNot(HaveOccurred())
insertRecordIntoTable(tableNamePitr, 3, conn)
})
- assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration)
+ assertArchiveWalOnAzureBlob(namespace, clusterName, azureConfig)
AssertArchiveConditionMet(namespace, clusterName, "5m")
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
}
diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go
index cb3254c5a3..8b4e8f47ae 100644
--- a/tests/e2e/backup_restore_azurite_test.go
+++ b/tests/e2e/backup_restore_azurite_test.go
@@ -21,7 +21,12 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -61,17 +66,20 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore)
BeforeAll(func() {
const namespacePrefix = "cluster-backup-azurite"
var err error
- clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azuriteBlobSampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create and assert ca and tls certificate secrets on Azurite
By("creating ca and tls certificate secrets", func() {
- err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName,
- azuriteCaSecName, azuriteTLSSecName, env)
+ err := backups.CreateCertificateSecretsOnAzurite(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ azuriteCaSecName, azuriteTLSSecName,
+ )
Expect(err).ToNot(HaveOccurred())
})
// Setup Azurite and az cli along with Postgresql cluster
@@ -86,7 +94,7 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore)
// Create a scheduled backup with the 'immediate' option enabled.
// We expect the backup to be available
It("immediately starts a backup using ScheduledBackups immediate option", func() {
- scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupImmediateSampleFile)
Expect(err).ToNot(HaveOccurred())
AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName)
@@ -94,7 +102,7 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore)
// AssertScheduledBackupsImmediate creates at least two backups, we should find
// their base backups
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
+ return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
}, 30).Should(BeNumerically("==", 2))
})
@@ -107,12 +115,14 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore)
prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp)
- cluster, err := testUtils.CreateClusterFromBackupUsingPITR(
+ cluster, err := backups.CreateClusterFromBackupUsingPITR(
+ env.Ctx,
+ env.Client,
+ env.Scheme,
namespace,
restoredClusterName,
backupFilePITR,
*currentTimestamp,
- env,
)
Expect(err).NotTo(HaveOccurred())
AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env)
@@ -121,7 +131,7 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore)
AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002")
By("deleting the restored cluster", func() {
- Expect(testUtils.DeleteObject(env, cluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed())
})
})
@@ -130,13 +140,13 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore)
// We then patch it again back to its initial state and verify that
// the amount of backups keeps increasing again
It("verifies that scheduled backups can be suspended", func() {
- scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile)
Expect(err).ToNot(HaveOccurred())
By("scheduling backups", func() {
AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300)
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
+ return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
}, 60).Should(BeNumerically(">=", 3))
})
@@ -164,21 +174,23 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
}
const namespacePrefix = "recovery-barman-object-azurite"
var err error
- clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azuriteBlobSampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create and assert ca and tls certificate secrets on Azurite
By("creating ca and tls certificate secrets", func() {
- err := testUtils.CreateCertificateSecretsOnAzurite(
+ err := backups.CreateCertificateSecretsOnAzurite(
+ env.Ctx,
+ env.Client,
namespace,
clusterName,
azuriteCaSecName,
azuriteTLSSecName,
- env)
+ )
Expect(err).ToNot(HaveOccurred())
})
// Setup Azurite and az cli along with PostgreSQL cluster
@@ -206,8 +218,9 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp)
// Create a cluster from a particular time using external backup.
- restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite(
- namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env)
+ restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzurite(
+ env.Ctx, env.Client,
+ namespace, externalClusterRestoreName, clusterName, *currentTimestamp)
Expect(err).NotTo(HaveOccurred())
AssertClusterWasRestoredWithPITRAndApplicationDB(
@@ -218,7 +231,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
)
By("delete restored cluster", func() {
- Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed())
})
})
})
@@ -226,20 +239,20 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) {
By("creating the Azurite storage credentials", func() {
- err := testUtils.CreateStorageCredentialsOnAzurite(namespace, env)
+ err := backups.CreateStorageCredentialsOnAzurite(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
})
By("setting up Azurite to hold the backups", func() {
// Deploying azurite for blob storage
- err := testUtils.InstallAzurite(namespace, env)
+ err := backups.InstallAzurite(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
})
By("setting up az-cli", func() {
// This is required as we have a service of Azurite running locally.
// In order to connect, we need az cli inside the namespace
- err := testUtils.InstallAzCli(namespace, env)
+ err := backups.InstallAzCli(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
})
@@ -262,7 +275,7 @@ func prepareClusterBackupOnAzurite(
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -270,18 +283,22 @@ func prepareClusterBackupOnAzurite(
By("backing up a cluster and verifying it exists on azurite", func() {
// We create a Backup
- testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupFile, false,
+ testTimeouts[testUtils.BackupIsReady],
+ )
// Verifying file called data.tar should be available on Azurite blob storage
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
+ return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
}, 30).Should(BeNumerically(">=", 1))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
})
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
}
func prepareClusterForPITROnAzurite(
@@ -292,13 +309,17 @@ func prepareClusterForPITROnAzurite(
) {
By("backing up a cluster and verifying it exists on azurite", func() {
// We create a Backup
- testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupSampleFile, false,
+ testTimeouts[testUtils.BackupIsReady],
+ )
// Verifying file called data.tar should be available on Azurite blob storage
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
+ return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar")
}, 30).Should(BeNumerically(">=", 1))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
@@ -308,23 +329,29 @@ func prepareClusterForPITROnAzurite(
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "for_restore",
}
AssertCreateTestData(env, tableLocator)
By("getting currentTimestamp", func() {
- ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env)
+ ts, err := postgres.GetCurrentTimestamp(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName,
+ )
*currentTimestamp = ts
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -347,7 +374,7 @@ func assertArchiveWalOnAzurite(namespace, clusterName string) {
path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL)
// verifying on blob storage using az
Eventually(func() (int, error) {
- return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path)
+ return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path)
}, 60).Should(BeEquivalentTo(1))
})
}
diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go
index 41ada349f0..4f4a611d1d 100644
--- a/tests/e2e/backup_restore_minio_test.go
+++ b/tests/e2e/backup_restore_minio_test.go
@@ -24,8 +24,16 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -60,10 +68,10 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
}
const namespacePrefix = "cluster-backup-minio"
var err error
- clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("create the certificates for MinIO", func() {
@@ -72,12 +80,13 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
})
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -89,7 +98,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env)
By("verify test connectivity to minio using barman-cloud-wal-archive script", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive(
@@ -113,9 +122,9 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template"
)
var backup *apiv1.Backup
- restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile)
+ restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile)
Expect(err).ToNot(HaveOccurred())
- backupName, err := env.GetResourceNameFromYAML(backupFile)
+ backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupFile)
Expect(err).ToNot(HaveOccurred())
// Create required test data
AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName)
@@ -126,7 +135,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -137,28 +146,28 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
// There should be a backup resource and
By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar),
func() {
- backup = testUtils.ExecuteBackup(namespace, backupFile, false,
- testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backup = backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, backupFile, false,
+ testTimeouts[timeouts.BackupIsReady])
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
return cluster.Status.LastSuccessfulBackup, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
@@ -169,14 +178,17 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
By("verifying the backup is using the expected barman-cloud-backup options", func() {
Expect(backup).ToNot(BeNil())
Expect(backup.Status.InstanceID).ToNot(BeNil())
- logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env)
+ logEntries, err := logs.ParseJSONLogs(
+ env.Ctx, env.Interface, namespace,
+ backup.Status.InstanceID.PodName,
+ )
Expect(err).ToNot(HaveOccurred())
expectedBaseBackupOptions := []string{
"--immediate-checkpoint",
"--min-chunk-size=5MB",
"--read-timeout=59",
}
- result, err := testUtils.CheckOptionsForBarmanCommand(
+ result, err := logs.CheckOptionsForBarmanCommand(
logEntries,
barmanCloudBackupLogEntry,
backup.Name,
@@ -201,7 +213,11 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
err = env.Client.Delete(env.Ctx, backup)
Expect(err).ToNot(HaveOccurred())
// create a second backup
- testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupFile, false,
+ testTimeouts[timeouts.BackupIsReady],
+ )
latestTar = minio.GetFilePath(clusterName, "data.tar")
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
@@ -214,7 +230,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
ctrlclient.ObjectKey{Namespace: namespace, Name: backupName},
backup)
Expect(err).ToNot(HaveOccurred())
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// We know that our current images always contain the latest barman version
if cluster.ShouldForceLegacyBackup() {
@@ -227,7 +243,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
// Restore backup in a new cluster, also cover if no application database is configured
AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName)
- cluster, err := env.GetCluster(namespace, restoredClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName)
Expect(err).ToNot(HaveOccurred())
AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster)
@@ -265,7 +281,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml"
)
- targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile)
+ targetClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioStandbySampleFile)
Expect(err).ToNot(HaveOccurred())
// Create the cluster with custom serverName in the backup spec
@@ -280,7 +296,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: targetClusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -291,13 +307,17 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
// There should be a backup resource and
By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v",
latestTar), func() {
- testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupStandbyFile, true,
+ testTimeouts[timeouts.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, targetClusterName)
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, targetClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName)
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
})
@@ -315,7 +335,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml"
)
- targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile)
+ targetClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile)
Expect(err).ToNot(HaveOccurred())
// Create the cluster with custom serverName in the backup spec
@@ -330,7 +350,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: targetClusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -341,14 +361,17 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
// There should be a backup resource and
By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+
" backup path is %v", latestTar), func() {
- testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady],
- env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupWithTargetFile, true,
+ testTimeouts[timeouts.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, targetClusterName)
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, targetClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName)
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
})
@@ -375,7 +398,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
clusterServerName = "pg-backup-minio-Custom-Name"
)
- customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile)
+ customClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioCustomSampleFile)
Expect(err).ToNot(HaveOccurred())
// Create the cluster with custom serverName in the backup spec
@@ -390,7 +413,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: customClusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -399,8 +422,12 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
// There should be a backup resource and
By("backing up a cluster and verifying it exists on minio", func() {
- testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, customClusterName)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupFileCustom, false,
+ testTimeouts[timeouts.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, customClusterName)
latestBaseTar := minio.GetFilePath(clusterServerName, "data.tar")
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestBaseTar)
@@ -408,7 +435,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar))
// this is the second backup we take on the bucket
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, customClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, customClusterName)
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
})
@@ -431,7 +458,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
It("immediately starts a backup using ScheduledBackups 'immediate' option", func() {
const scheduledBackupSampleFile = fixturesDir +
"/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml"
- scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile)
Expect(err).ToNot(HaveOccurred())
AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName)
@@ -458,21 +485,23 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
currentTimestamp,
)
- cluster, err := testUtils.CreateClusterFromBackupUsingPITR(
+ cluster, err := backups.CreateClusterFromBackupUsingPITR(
+ env.Ctx,
+ env.Client,
+ env.Scheme,
namespace,
restoredClusterName,
backupFilePITR,
*currentTimestamp,
- env,
)
Expect(err).NotTo(HaveOccurred())
- AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReady], env)
// Restore backup in a new cluster, also cover if no application database is configured
AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003")
By("deleting the restored cluster", func() {
- Expect(testUtils.DeleteObject(env, cluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed())
})
})
@@ -483,7 +512,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
It("verifies that scheduled backups can be suspended", func() {
const scheduledBackupSampleFile = fixturesDir +
"/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml"
- scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile)
+ scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile)
Expect(err).ToNot(HaveOccurred())
By("scheduling backups", func() {
@@ -504,14 +533,14 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore),
Expect(err).ToNot(HaveOccurred())
Expect(tags.Tags).ToNot(BeEmpty())
- currentPrimary, err := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
oldPrimary := currentPrimary.GetName()
// Force-delete the primary
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete)
+ err = pods.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete)
Expect(err).ToNot(HaveOccurred())
AssertNewPrimary(namespace, clusterName, oldPrimary)
@@ -551,19 +580,20 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
}
const namespacePrefix = "recovery-barman-object-minio"
var err error
- clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileMinio)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -577,7 +607,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env)
By("verify test connectivity to minio using barman-cloud-wal-archive script", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive(
@@ -592,14 +622,14 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section",
func() {
- externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio)
+ externalClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, externalClusterFileMinio)
Expect(err).ToNot(HaveOccurred())
// Write a table and some data on the "app" database
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -608,12 +638,13 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
// There should be a backup resource and
By("backing up a cluster and verifying it exists on minio", func() {
- testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false,
- testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeFirstBackupFileMinio,
+ false,
+ testTimeouts[timeouts.BackupIsReady])
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
// TODO: this is to force a CHECKPOINT when we run the backup on standby.
- // This should be better handled inside ExecuteBackup
+ // This should be better handled inside Execute
AssertArchiveWalOnMinio(namespace, clusterName, clusterName)
latestTar := minio.GetFilePath(clusterName, "data.tar")
@@ -622,7 +653,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
}, 60).Should(BeEquivalentTo(1),
fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
@@ -638,7 +669,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
tableLocator = TableLocator{
Namespace: namespace,
ClusterName: externalClusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
@@ -657,16 +688,22 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
// We have already written 2 rows in test table 'to_restore' in above test now we will take current
// timestamp. It will use to restore cluster from source using PITR
By("getting currentTimestamp", func() {
- ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env)
+ ts, err := postgres.GetCurrentTimestamp(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName,
+ )
*currentTimestamp = ts
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -679,9 +716,10 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
insertRecordIntoTable(tableName, 4, conn)
})
By("creating second backup and verifying it exists on minio", func() {
- testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false,
- testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeSecondBackupFileMinio,
+ false,
+ testTimeouts[timeouts.BackupIsReady])
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
latestTar := minio.GetFilePath(clusterName, "data.tar")
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
@@ -691,8 +729,9 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
var restoredCluster *apiv1.Cluster
By("create a cluster from backup with PITR", func() {
var err error
- restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio(
- namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env)
+ restoredCluster, err = backups.CreateClusterFromExternalClusterBackupWithPITROnMinio(
+ env.Ctx, env.Client,
+ namespace, externalClusterRestoreName, clusterName, *currentTimestamp)
Expect(err).NotTo(HaveOccurred())
})
AssertClusterWasRestoredWithPITRAndApplicationDB(
@@ -702,7 +741,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
"00000002",
)
By("delete restored cluster", func() {
- Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed())
})
})
@@ -711,7 +750,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "for_restore_repl",
}
AssertCreateTestData(env, tableLocator)
@@ -719,9 +758,9 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes
AssertArchiveWalOnMinio(namespace, clusterName, clusterName)
By("backing up a cluster and verifying it exists on minio", func() {
- testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false,
- testTimeouts[testUtils.BackupIsReady], env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeThirdBackupFileMinio, false,
+ testTimeouts[timeouts.BackupIsReady])
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
latestTar := minio.GetFilePath(clusterName, "data.tar")
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
@@ -750,7 +789,11 @@ func prepareClusterForPITROnMinio(
const tableNamePitr = "for_restore"
By("backing up a cluster and verifying it exists on minio", func() {
- testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, backupSampleFile, false,
+ testTimeouts[timeouts.BackupIsReady],
+ )
latestTar := minio.GetFilePath(clusterName, "data.tar")
Eventually(func() (int, error) {
return minio.CountFiles(minioEnv, latestTar)
@@ -758,7 +801,7 @@ func prepareClusterForPITROnMinio(
fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar,
expectedVal))
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
@@ -768,23 +811,29 @@ func prepareClusterForPITROnMinio(
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableNamePitr,
}
AssertCreateTestData(env, tableLocator)
By("getting currentTimestamp", func() {
- ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env)
+ ts, err := postgres.GetCurrentTimestamp(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName,
+ )
*currentTimestamp = ts
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -796,5 +845,5 @@ func prepareClusterForPITROnMinio(
})
AssertArchiveWalOnMinio(namespace, clusterName, clusterName)
AssertArchiveConditionMet(namespace, clusterName, "5m")
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
}
diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go
index 5cd0f173d7..a12c885059 100644
--- a/tests/e2e/certificates_test.go
+++ b/tests/e2e/certificates_test.go
@@ -17,12 +17,21 @@ limitations under the License.
package e2e
import (
+ "context"
"fmt"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
+ "k8s.io/utils/ptr"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -41,6 +50,95 @@ import (
// from an application, by using certificates that have been created by 'kubectl-cnpg'
// Then we verify that the server certificate and the operator are able to handle the provided server certificates
var _ = Describe("Certificates", func() {
+ createClientCertificatesViaKubectlPluginFunc := func(
+ ctx context.Context,
+ crudClient ctrlclient.Client,
+ cluster apiv1.Cluster,
+ certName string,
+ userName string,
+ ) error {
+ // clientCertName := "cluster-cert"
+ // user := "app"
+ // Create the certificate
+ _, _, err := run.Run(fmt.Sprintf(
+ "kubectl cnpg certificate %v --cnpg-cluster %v --cnpg-user %v -n %v",
+ certName,
+ cluster.Name,
+ userName,
+ cluster.Namespace))
+ if err != nil {
+ return err
+ }
+ // Verifying client certificate secret existence
+ secret := &corev1.Secret{}
+ err = crudClient.Get(ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: certName}, secret)
+ return err
+ }
+
+ defaultPodFunc := func(namespace string, name string, rootCASecretName string, tlsSecretName string) corev1.Pod {
+ var secretMode int32 = 0o600
+ seccompProfile := &corev1.SeccompProfile{
+ Type: corev1.SeccompProfileTypeRuntimeDefault,
+ }
+
+ return corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "secret-volume-root-ca",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: rootCASecretName,
+ DefaultMode: &secretMode,
+ },
+ },
+ },
+ {
+ Name: "secret-volume-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: tlsSecretName,
+ DefaultMode: &secretMode,
+ },
+ },
+ },
+ },
+ Containers: []corev1.Container{
+ {
+ Name: name,
+ Image: "ghcr.io/cloudnative-pg/webtest:1.6.0",
+ Ports: []corev1.ContainerPort{
+ {
+ ContainerPort: 8080,
+ },
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "secret-volume-root-ca",
+ MountPath: "/etc/secrets/ca",
+ },
+ {
+ Name: "secret-volume-tls",
+ MountPath: "/etc/secrets/tls",
+ },
+ },
+ SecurityContext: &corev1.SecurityContext{
+ AllowPrivilegeEscalation: ptr.To(false),
+ SeccompProfile: seccompProfile,
+ },
+ },
+ },
+ SecurityContext: &corev1.PodSecurityContext{
+ SeccompProfile: seccompProfile,
+ },
+ },
+ }
+ }
+
const (
serverCASecretName = "my-postgresql-server-ca" // #nosec
serverCertSecretName = "my-postgresql-server" // #nosec
@@ -67,7 +165,7 @@ var _ = Describe("Certificates", func() {
cleanClusterCertification := func() {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
cluster.Spec.Certificates.ServerTLSSecret = ""
cluster.Spec.Certificates.ServerCASecret = ""
@@ -82,20 +180,21 @@ var _ = Describe("Certificates", func() {
var err error
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "postgresql-cert"
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
// Create the client certificate
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- err = utils.CreateClientCertificatesViaKubectlPlugin(
+ err = createClientCertificatesViaKubectlPluginFunc(
+ env.Ctx,
+ env.Client,
*cluster,
kubectlCNPGClientCertSecretName,
"app",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -106,96 +205,99 @@ var _ = Describe("Certificates", func() {
It("can authenticate using a Certificate that is generated from the 'kubectl-cnpg' plugin",
Label(tests.LabelPlugin), func() {
- pod := utils.DefaultWebapp(namespace, "app-pod-cert-1",
+ pod := defaultPodFunc(namespace, "app-pod-cert-1",
defaultCASecretName, kubectlCNPGClientCertSecretName)
- err := utils.PodCreateAndWaitForReady(env, &pod, 240)
+ err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
Expect(err).ToNot(HaveOccurred())
AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
})
- It("can authenticate after switching to user-supplied server certs", Label(tests.LabelServiceConnectivity), func() {
- CreateAndAssertServerCertificatesSecrets(
- namespace,
- clusterName,
- serverCASecretName,
- serverCertSecretName,
- false,
- )
+ It("can authenticate after switching to user-supplied server certs", Label(tests.LabelServiceConnectivity),
+ func() {
+ CreateAndAssertServerCertificatesSecrets(
+ namespace,
+ clusterName,
+ serverCASecretName,
+ serverCertSecretName,
+ false,
+ )
- var err error
- // Updating defaults certificates entries with user provided certificates,
- // i.e server CA and TLS secrets inside the cluster
- Eventually(func() error {
- _, _, err = utils.RunUnchecked(fmt.Sprintf(
- "kubectl patch cluster %v -n %v -p "+
- "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+
- "\"serverTLSSecret\":\"%v\"}}}'"+
- " --type='merge'", clusterName, namespace, serverCASecretName, serverCertSecretName))
- if err != nil {
- return err
- }
- return nil
- }, 60, 5).Should(Succeed())
-
- Eventually(func() (bool, error) {
- certUpdateStatus := false
- cluster, err := env.GetCluster(namespace, clusterName)
- if cluster.Status.Certificates.ServerCASecret == serverCASecretName {
- if cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName {
- certUpdateStatus = true
+ var err error
+ // Updating defaults certificates entries with user provided certificates,
+ // i.e server CA and TLS secrets inside the cluster
+ Eventually(func() error {
+ _, _, err = run.Unchecked(fmt.Sprintf(
+ "kubectl patch cluster %v -n %v -p "+
+ "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+
+ "\"serverTLSSecret\":\"%v\"}}}'"+
+ " --type='merge'", clusterName, namespace, serverCASecretName, serverCertSecretName))
+ if err != nil {
+ return err
}
- }
- return certUpdateStatus, err
- }, 120).Should(BeTrue(), fmt.Sprintf("Error: %v", err))
+ return nil
+ }, 60, 5).Should(Succeed())
- pod := utils.DefaultWebapp(
- namespace,
- "app-pod-cert-2",
- serverCASecretName,
- kubectlCNPGClientCertSecretName,
- )
- err = utils.PodCreateAndWaitForReady(env, &pod, 240)
- Expect(err).ToNot(HaveOccurred())
- AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
- })
+ Eventually(func() (bool, error) {
+ certUpdateStatus := false
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
+ if cluster.Status.Certificates.ServerCASecret == serverCASecretName {
+ if cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName {
+ certUpdateStatus = true
+ }
+ }
+ return certUpdateStatus, err
+ }, 120).Should(BeTrue(), fmt.Sprintf("Error: %v", err))
- It("can connect after switching to user-supplied client certificates", Label(tests.LabelServiceConnectivity), func() {
- // Create certificates secret for client
- CreateAndAssertClientCertificatesSecrets(namespace, clusterName, clientCASecretName, replicaCertSecretName,
- clientCertSecretName, false)
-
- // Updating defaults certificates entries with user provided certificates,
- // i.e client CA and TLS secrets inside the cluster
- Eventually(func() error {
- _, _, err := utils.RunUnchecked(fmt.Sprintf(
- "kubectl patch cluster %v -n %v -p "+
- "'{\"spec\":{\"certificates\":{\"clientCASecret\":\"%v\","+
- "\"replicationTLSSecret\":\"%v\"}}}'"+
- " --type='merge'", clusterName, namespace, clientCASecretName, replicaCertSecretName))
- if err != nil {
- return err
- }
- return nil
- }, 60, 5).Should(Succeed())
-
- Eventually(func() (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
- return cluster.Spec.Certificates.ClientCASecret == clientCASecretName &&
- cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err
- }, 120, 5).Should(BeTrue())
-
- pod := utils.DefaultWebapp(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName)
- err := utils.PodCreateAndWaitForReady(env, &pod, 240)
- Expect(err).ToNot(HaveOccurred())
- AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
- })
+ pod := defaultPodFunc(
+ namespace,
+ "app-pod-cert-2",
+ serverCASecretName,
+ kubectlCNPGClientCertSecretName,
+ )
+ err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
+ Expect(err).ToNot(HaveOccurred())
+ AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
+ })
+
+ It("can connect after switching to user-supplied client certificates", Label(tests.LabelServiceConnectivity),
+ func() {
+ // Create certificates secret for client
+ CreateAndAssertClientCertificatesSecrets(namespace, clusterName, clientCASecretName,
+ replicaCertSecretName,
+ clientCertSecretName, false)
+
+ // Updating defaults certificates entries with user provided certificates,
+ // i.e client CA and TLS secrets inside the cluster
+ Eventually(func() error {
+ _, _, err := run.Unchecked(fmt.Sprintf(
+ "kubectl patch cluster %v -n %v -p "+
+ "'{\"spec\":{\"certificates\":{\"clientCASecret\":\"%v\","+
+ "\"replicationTLSSecret\":\"%v\"}}}'"+
+ " --type='merge'", clusterName, namespace, clientCASecretName, replicaCertSecretName))
+ if err != nil {
+ return err
+ }
+ return nil
+ }, 60, 5).Should(Succeed())
+
+ Eventually(func() (bool, error) {
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
+ return cluster.Spec.Certificates.ClientCASecret == clientCASecretName &&
+ cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err
+ }, 120, 5).Should(BeTrue())
+
+ pod := defaultPodFunc(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName)
+ err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
+ Expect(err).ToNot(HaveOccurred())
+ AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
+ })
It("can connect after switching both server and client certificates to user-supplied mode",
Label(tests.LabelServiceConnectivity), func() {
// Updating defaults certificates entries with user provided certificates,
// i.e server and client CA and TLS secrets inside the cluster
Eventually(func() error {
- _, _, err := utils.RunUnchecked(fmt.Sprintf(
+ _, _, err := run.Unchecked(fmt.Sprintf(
"kubectl patch cluster %v -n %v -p "+
"'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+
"\"serverTLSSecret\":\"%v\",\"clientCASecret\":\"%v\","+
@@ -215,15 +317,15 @@ var _ = Describe("Certificates", func() {
}, 60, 5).Should(Succeed())
Eventually(func() (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.Certificates.ServerCASecret == serverCASecretName &&
cluster.Status.Certificates.ClientCASecret == clientCASecretName &&
cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName &&
cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err
}, 120, 5).Should(BeTrue())
- pod := utils.DefaultWebapp(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName)
- err := utils.PodCreateAndWaitForReady(env, &pod, 240)
+ pod := defaultPodFunc(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName)
+ err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
Expect(err).ToNot(HaveOccurred())
AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
})
@@ -242,7 +344,7 @@ var _ = Describe("Certificates", func() {
var err error
// Create a cluster in a namespace that will be deleted after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
CreateAndAssertServerCertificatesSecrets(
namespace,
@@ -252,23 +354,24 @@ var _ = Describe("Certificates", func() {
false,
)
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- err = utils.CreateClientCertificatesViaKubectlPlugin(
+ err = createClientCertificatesViaKubectlPluginFunc(
+ env.Ctx,
+ env.Client,
*cluster,
kubectlCNPGClientCertSecretName,
"app",
- env,
)
Expect(err).ToNot(HaveOccurred())
- pod := utils.DefaultWebapp(
+ pod := defaultPodFunc(
namespace,
"app-pod-cert-2",
serverCASecretName,
kubectlCNPGClientCertSecretName,
)
- err = utils.PodCreateAndWaitForReady(env, &pod, 240)
+ err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
Expect(err).ToNot(HaveOccurred())
AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
})
@@ -287,7 +390,7 @@ var _ = Describe("Certificates", func() {
var err error
// Create a cluster in a namespace that will be deleted after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create certificates secret for client
@@ -300,8 +403,8 @@ var _ = Describe("Certificates", func() {
false,
)
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- pod := utils.DefaultWebapp(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName)
- err = utils.PodCreateAndWaitForReady(env, &pod, 240)
+ pod := defaultPodFunc(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName)
+ err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
Expect(err).ToNot(HaveOccurred())
AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
})
@@ -320,7 +423,7 @@ var _ = Describe("Certificates", func() {
// Create a cluster in a namespace that will be deleted after the test
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create certificates secret for server
@@ -341,8 +444,8 @@ var _ = Describe("Certificates", func() {
false,
)
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- pod := utils.DefaultWebapp(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName)
- err = utils.PodCreateAndWaitForReady(env, &pod, 240)
+ pod := defaultPodFunc(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName)
+ err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240)
Expect(err).ToNot(HaveOccurred())
AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod)
})
diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go
index 476ea0e4aa..b48712f541 100644
--- a/tests/e2e/cluster_microservice_test.go
+++ b/tests/e2e/cluster_microservice_test.go
@@ -29,7 +29,12 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -60,19 +65,19 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin
It("can import a database with large objects", func() {
var err error
const namespacePrefix = "microservice-large-object"
- sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile)
+ sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile)
Expect(err).ToNot(HaveOccurred())
oid := 16393
data := "large object test"
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env)
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: sourceClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -83,23 +88,23 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin
tableLocator = TableLocator{
Namespace: namespace,
ClusterName: importedClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
AssertLargeObjectValue(namespace, importedClusterName, oid, data)
By("deleting the imported database", func() {
- Expect(testsUtils.DeleteObject(env, cluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed())
})
})
It("can import a database", func() {
var err error
const namespacePrefix = "microservice"
- sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile)
+ sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env)
assertCreateTableWithDataOnSourceCluster(namespace, tableName, sourceClusterName)
@@ -109,7 +114,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: importedClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
@@ -121,7 +126,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin
const namespacePrefix = "microservice-different-db"
importedClusterName = "cluster-pgdump-different-db"
// create namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
assertImportRenamesSelectedDatabase(namespace, sourceSampleFile,
importedClusterName, tableName, "")
@@ -132,9 +137,9 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin
// nonexistent database in cluster definition while importing
var err error
const namespacePrefix = "cnpg-microservice-error"
- sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile)
+ sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env)
@@ -171,14 +176,14 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin
}
// Gather the target image
- targetImage, err := testsUtils.BumpPostgresImageMajorVersion(postgresImage)
+ targetImage, err := postgres.BumpPostgresImageMajorVersion(postgresImage)
Expect(err).ToNot(HaveOccurred())
Expect(targetImage).ShouldNot(BeEmpty(), "targetImage could not be empty")
By(fmt.Sprintf("import cluster with different major, target version is %s", targetImage), func() {
var err error
// create namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
assertImportRenamesSelectedDatabase(namespace, sourceSampleFile, importedClusterName,
tableName, targetImage)
@@ -211,7 +216,7 @@ func assertCreateTableWithDataOnSourceCluster(
) {
By("create user, insert record in new table, assign new user as owner "+
"and grant read only to app user", func() {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
query := fmt.Sprintf(
@@ -222,12 +227,13 @@ func assertCreateTableWithDataOnSourceCluster(
"GRANT SELECT ON %[1]v TO app;",
tableName)
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
query)
Expect(err).ToNot(HaveOccurred())
})
@@ -240,28 +246,29 @@ func assertTableAndDataOnImportedCluster(
importedClusterName string,
) {
By("verifying presence of table and data from source in imported cluster", func() {
- pod, err := env.GetClusterPrimary(namespace, importedClusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, importedClusterName)
Expect(err).ToNot(HaveOccurred())
By("Verifying imported table has owner app user", func() {
queryImported := fmt.Sprintf(
"select * from pg_tables where tablename = '%v' and tableowner = '%v'",
tableName,
- testsUtils.AppUser,
+ postgres.AppUser,
)
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
queryImported)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, tableName), err).Should(BeTrue())
})
By("verifying the user named 'micro' on source is not in imported database", func() {
- Eventually(QueryMatchExpectationPredicate(pod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(pod, postgres.PostgresDBName,
roleExistsQuery("micro"), "f"), 30).Should(Succeed())
})
})
@@ -279,23 +286,24 @@ func assertImportRenamesSelectedDatabase(
) {
dbList := []string{"db1", "db2", "db3"}
dbToImport := dbList[1]
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("creating multiple dbs on source and set ownership to app", func() {
for _, db := range dbList {
// Create database
createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER app", db)
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
createDBQuery)
Expect(err).ToNot(HaveOccurred())
}
@@ -304,15 +312,17 @@ func assertImportRenamesSelectedDatabase(
By(fmt.Sprintf("creating table '%s' and insert records on selected db %v", tableName, dbToImport), func() {
// create a table with two records
query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName)
- _, err = testsUtils.RunExecOverForward(env, namespace, clusterName, dbToImport,
+ _, err = postgres.RunExecOverForward(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, dbToImport,
apiv1.ApplicationUserSecretSuffix, query)
Expect(err).ToNot(HaveOccurred())
})
var importedCluster *apiv1.Cluster
By("importing Database with microservice approach in a new cluster", func() {
- importedCluster, err = testsUtils.ImportDatabaseMicroservice(namespace, clusterName,
- importedClusterName, imageName, dbToImport, env)
+ importedCluster, err = importdb.ImportDatabaseMicroservice(env.Ctx, env.Client, namespace, clusterName,
+ importedClusterName, imageName, dbToImport)
Expect(err).ToNot(HaveOccurred())
// We give more time than the usual 600s, since the recovery is slower
AssertClusterIsReady(namespace, importedClusterName, 1000, env)
@@ -322,18 +332,18 @@ func assertImportRenamesSelectedDatabase(
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: importedClusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
By("verifying that only 'app' DB exists in the imported cluster", func() {
- importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName)
+ importedPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, importedClusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, postgres.PostgresDBName,
roleExistsQuery("db2"), "f"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, postgres.PostgresDBName,
roleExistsQuery("app"), "t"), 30).Should(Succeed())
})
@@ -341,6 +351,6 @@ func assertImportRenamesSelectedDatabase(
err = DeleteResourcesFromFile(namespace, sampleFile)
Expect(err).ToNot(HaveOccurred())
- Expect(testsUtils.DeleteObject(env, importedCluster)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, importedCluster)).To(Succeed())
})
}
diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go
index 34d1f3de9e..05099d1ebf 100644
--- a/tests/e2e/cluster_monolithic_test.go
+++ b/tests/e2e/cluster_monolithic_test.go
@@ -25,7 +25,10 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -52,7 +55,7 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD
)
var namespace, sourceClusterName string
- var forwardTarget *testsUtils.PSQLForwardConnection
+ var forwardTarget *postgres.PSQLForwardConnection
var connTarget *sql.DB
BeforeEach(func() {
@@ -68,19 +71,22 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD
By("creating the source cluster", func() {
const namespacePrefix = "cluster-monolith"
- sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterFile)
+ sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, sourceClusterName, sourceClusterFile, env)
})
By("creating several roles, one of them a superuser and source databases", func() {
- forward, conn, err := testsUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
sourceClusterName,
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
apiv1.SuperUserSecretSuffix,
)
defer func() {
@@ -116,13 +122,13 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD
for _, database := range sourceDatabases {
query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName)
conn, err := forward.GetPooler().Connection(database)
+ Expect(err).ToNot(HaveOccurred())
// We need to set the max idle connection back to a higher number
// otherwise the conn.Exec() will close the connection
// and that will produce a RST packet from PostgreSQL that will kill the
// port-forward tunnel
// More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark
conn.SetMaxIdleConns(3)
- Expect(err).ToNot(HaveOccurred())
_, err = conn.Exec(query)
Expect(err).ToNot(HaveOccurred())
}
@@ -131,26 +137,33 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD
By("creating target cluster", func() {
postgresImage := os.Getenv("POSTGRES_IMG")
Expect(postgresImage).ShouldNot(BeEmpty(), "POSTGRES_IMG env should not be empty")
- expectedImageName, err := testsUtils.BumpPostgresImageMajorVersion(postgresImage)
+ expectedImageName, err := postgres.BumpPostgresImageMajorVersion(postgresImage)
Expect(err).ToNot(HaveOccurred())
Expect(expectedImageName).ShouldNot(BeEmpty(), "imageName could not be empty")
- _, err = testsUtils.ImportDatabasesMonolith(namespace,
+
+ _, err = importdb.ImportDatabasesMonolith(
+ env.Ctx,
+ env.Client,
+ namespace,
sourceClusterName,
targetClusterName,
expectedImageName,
sourceDatabases,
sourceRoles,
- env)
+ )
Expect(err).ToNot(HaveOccurred())
- AssertClusterIsReady(namespace, targetClusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, targetClusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("connect to the imported cluster", func() {
- forwardTarget, connTarget, err = testsUtils.ForwardPSQLConnection(
- env,
+ forwardTarget, connTarget, err = postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
targetClusterName,
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
apiv1.SuperUserSecretSuffix,
)
Expect(err).ToNot(HaveOccurred())
@@ -182,13 +195,13 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD
for _, database := range sourceDatabases {
selectQuery := fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName)
connTemp, err := forwardTarget.GetPooler().Connection(database)
+ Expect(err).ToNot(HaveOccurred())
// We need to set the max idle connection back to a higher number
// otherwise the conn.Exec() will close the connection
// and that will produce a RST packet from PostgreSQL that will kill the
// port-forward tunnel
// More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark
connTemp.SetMaxIdleConns(3)
- Expect(err).ToNot(HaveOccurred())
row := connTemp.QueryRow(selectQuery)
var count int
err = row.Scan(&count)
diff --git a/tests/e2e/cluster_setup_test.go b/tests/e2e/cluster_setup_test.go
index 9f2d124712..1e20854751 100644
--- a/tests/e2e/cluster_setup_test.go
+++ b/tests/e2e/cluster_setup_test.go
@@ -27,7 +27,9 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -53,13 +55,13 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
By("having three PostgreSQL pods with status ready", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(utils.CountReadyPods(podList.Items), err).Should(BeEquivalentTo(3))
})
@@ -75,11 +77,14 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun
err := env.Client.Get(env.Ctx, namespacedName, pod)
Expect(err).ToNot(HaveOccurred())
- forward, conn, err := testsUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
Expect(err).NotTo(HaveOccurred())
@@ -121,13 +126,16 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun
return int32(-1), nil
}, timeout).Should(BeEquivalentTo(restart + 1))
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
- forward, conn, err = testsUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err = postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testsUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -145,7 +153,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun
const namespacePrefix = "cluster-conditions"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("having a %v namespace", namespace), func() {
@@ -172,7 +180,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun
// scale up the cluster to verify if the cluster remains in Ready
By("scaling up the cluster size", func() {
- err := env.ScaleClusterSize(namespace, clusterName, 5)
+ err := clusterutils.ScaleSize(env.Ctx, env.Client, namespace, clusterName, 5)
Expect(err).ToNot(HaveOccurred())
})
diff --git a/tests/e2e/commons_test.go b/tests/e2e/commons_test.go
index 38dc4007f8..50b12cb1f6 100644
--- a/tests/e2e/commons_test.go
+++ b/tests/e2e/commons_test.go
@@ -16,33 +16,35 @@ limitations under the License.
package e2e
-import "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+import (
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/cloudvendors"
+)
-func MustGetEnvProfile() utils.EnvProfile {
- return utils.GetEnvProfile(*testCloudVendorEnv)
+func MustGetEnvProfile() cloudvendors.EnvProfile {
+ return cloudvendors.GetEnvProfile(*testCloudVendorEnv)
}
// IsAKS checks if the running cluster is on AKS
func IsAKS() bool {
- return *testCloudVendorEnv == utils.AKS
+ return *testCloudVendorEnv == cloudvendors.AKS
}
// IsEKS checks if the running cluster is on EKS
func IsEKS() bool {
- return *testCloudVendorEnv == utils.EKS
+ return *testCloudVendorEnv == cloudvendors.EKS
}
// IsGKE checks if the running cluster is on GKE
func IsGKE() bool {
- return *testCloudVendorEnv == utils.GKE
+ return *testCloudVendorEnv == cloudvendors.GKE
}
// IsLocal checks if the running cluster is on local
func IsLocal() bool {
- return *testCloudVendorEnv == utils.LOCAL
+ return *testCloudVendorEnv == cloudvendors.LOCAL
}
// IsOpenshift checks if the running cluster is on OpenShift
func IsOpenshift() bool {
- return *testCloudVendorEnv == utils.OCP
+ return *testCloudVendorEnv == cloudvendors.OCP
}
diff --git a/tests/e2e/config_support_test.go b/tests/e2e/config_support_test.go
index bf29de31e1..8477e3a0c2 100644
--- a/tests/e2e/config_support_test.go
+++ b/tests/e2e/config_support_test.go
@@ -24,7 +24,9 @@ import (
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -49,7 +51,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
Skip("Test depth is lower than the amount requested for this test")
}
- operatorDeployment, err := env.GetOperatorDeployment()
+ operatorDeployment, err := operator.GetDeployment(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
operatorNamespace = operatorDeployment.GetNamespace()
@@ -74,14 +76,14 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
err = env.Client.Delete(env.Ctx, secret)
Expect(err).NotTo(HaveOccurred())
- err = utils.ReloadOperatorDeployment(env, 120)
+ err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120)
Expect(err).ToNot(HaveOccurred())
})
It("creates the configuration map and secret", func() {
// create a config map where operator is deployed
cmd := fmt.Sprintf("kubectl apply -n %v -f %v", operatorNamespace, configMapFile)
- _, _, err := utils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
// Check if configmap is created
Eventually(func() ([]corev1.ConfigMap, error) {
@@ -95,7 +97,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
// create a secret where operator is deployed
cmd = fmt.Sprintf("kubectl apply -n %v -f %v", operatorNamespace, secretFile)
- _, _, err = utils.Run(cmd)
+ _, _, err = run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
// Check if configmap is created
Eventually(func() ([]corev1.Secret, error) {
@@ -108,30 +110,31 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
}, 10).Should(HaveLen(1))
// Reload the operator with the new config
- err = utils.ReloadOperatorDeployment(env, 120)
+ err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120)
Expect(err).ToNot(HaveOccurred())
})
It("creates a cluster", func() {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterWithInheritedLabelsFile, env)
})
It("verify label's and annotation's inheritance when global config-map changed", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
By("checking the cluster has the requested labels", func() {
expectedLabels := map[string]string{"environment": "qaEnv"}
- Expect(utils.ClusterHasLabels(cluster, expectedLabels)).To(BeTrue())
+ Expect(clusterutils.HasLabels(cluster, expectedLabels)).To(BeTrue())
})
By("checking the pods inherit labels matching the ones in the configuration secret", func() {
expectedLabels := map[string]string{"environment": "qaEnv"}
Eventually(func() (bool, error) {
- return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedLabels)
+ return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName,
+ expectedLabels)
}, 180).Should(BeTrue())
})
By("checking the pods inherit labels matching wildcard ones in the configuration secret", func() {
@@ -140,17 +143,19 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
"example.com/prod": "prod",
}
Eventually(func() (bool, error) {
- return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedLabels)
+ return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName,
+ expectedLabels)
}, 180).Should(BeTrue())
})
By("checking the cluster has the requested annotation", func() {
expectedAnnotations := map[string]string{"categories": "DatabaseApplication"}
- Expect(utils.ClusterHasAnnotations(cluster, expectedAnnotations)).To(BeTrue())
+ Expect(clusterutils.HasAnnotations(cluster, expectedAnnotations)).To(BeTrue())
})
By("checking the pods inherit annotations matching the ones in the configuration configMap", func() {
expectedAnnotations := map[string]string{"categories": "DatabaseApplication"}
Eventually(func() (bool, error) {
- return utils.AllClusterPodsHaveAnnotations(env, namespace, clusterName, expectedAnnotations)
+ return clusterutils.AllPodsHaveAnnotations(env.Ctx, env.Client, namespace, clusterName,
+ expectedAnnotations)
}, 180).Should(BeTrue())
})
By("checking the pods inherit annotations matching wildcard ones in the configuration configMap", func() {
@@ -159,7 +164,8 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
"example.com/prod": "prod",
}
Eventually(func() (bool, error) {
- return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedAnnotations)
+ return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName,
+ expectedAnnotations)
}, 180).Should(BeTrue())
})
})
@@ -167,7 +173,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive,
// Setting MONITORING_QUERIES_CONFIGMAP: "" should disable monitoring
// queries on new cluster. We expect those metrics to be missing.
It("verify metrics details when updated default monitoring configMap queries parameter is set to be empty", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
collectAndAssertDefaultMetricsPresentOnEachPod(namespace, clusterName, cluster.IsMetricsTLSEnabled(), false)
diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go
index 74800a23ab..4918d6d755 100644
--- a/tests/e2e/configuration_update_test.go
+++ b/tests/e2e/configuration_update_test.go
@@ -32,7 +32,10 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -65,7 +68,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
cluster := &apiv1.Cluster{}
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.Parameters = paramsMap
return env.Client.Update(env.Ctx, cluster)
@@ -77,7 +80,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
cluster := &apiv1.Cluster{}
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.PgHBA = []string{"host all all all trust"}
return env.Client.Update(env.Ctx, cluster)
@@ -89,7 +92,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
cluster := &apiv1.Cluster{}
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.PgIdent = []string{"email /^(.*)@example\\.com \\1"}
return env.Client.Update(env.Ctx, cluster)
@@ -102,26 +105,27 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
cluster := &apiv1.Cluster{}
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
cluster.Spec.PostgresConfiguration.Parameters = params
return env.Client.Update(env.Ctx, cluster)
})
Expect(apierrors.IsInvalid(err)).To(BeTrue())
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Expect other config parameters applied together with a blockedParameter to not have changed
for idx := range podList.Items {
pod := podList.Items[idx]
Eventually(func(g Gomega) int {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show autovacuum_max_workers")
g.Expect(err).ToNot(HaveOccurred())
@@ -140,7 +144,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
By("create cluster with default configuration", func() {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
})
@@ -148,7 +152,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
It("01. reloading Pg when a parameter requiring reload is modified", func() {
// max_connection increase to 110
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("apply configuration update", func() {
@@ -161,12 +165,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
// Check that the parameter has been modified in every pod
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show work_mem")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n"))
return value, err, atoiErr
@@ -179,11 +184,11 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
endpointName := clusterName + "-rw"
// Connection should fail now because we are not supplying a password
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("verify that connections fail by default", func() {
- _, _, err := env.ExecCommand(env.Ctx, podList.Items[0],
+ _, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, podList.Items[0],
specs.PostgresContainerName, &commandTimeout,
"psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1",
)
@@ -201,19 +206,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
query := "select count(*) from pg_hba_file_rules where type = 'host' and auth_method = 'trust'"
for _, pod := range podList.Items {
Eventually(func() (string, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
query)
return strings.Trim(stdout, "\n"), err
}, timeout).Should(BeEquivalentTo("1"))
}
// The connection should work now
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecCommand(env.Ctx, podList.Items[0],
+ stdout, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, podList.Items[0],
specs.PostgresContainerName, &commandTimeout,
"psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n"))
@@ -225,10 +231,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
It("03. restarting and switching Pg when a parameter requiring restart is modified", func() {
timeout := 300
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary))
oldPrimary := cluster.Status.CurrentPrimary
@@ -243,12 +249,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
// Check that the new parameter has been modified in every pod
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show shared_buffers")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n"))
return value, err, atoiErr
@@ -259,7 +266,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
By("verify that a switchover happened", func() {
// Check that a switchover happened
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimary, err
}, timeout).ShouldNot(BeEquivalentTo(oldPrimary))
})
@@ -267,10 +274,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
It("04. restarting and switching Pg when mixed parameters are modified", func() {
timeout := 300
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary))
oldPrimary := cluster.Status.CurrentPrimary
@@ -286,24 +293,26 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
// Check that both parameters have been modified in each pod
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show max_replication_slots")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n"))
return value, err, atoiErr
}, timeout).Should(BeEquivalentTo(16))
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show maintenance_work_mem")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n"))
return value, err, atoiErr
@@ -314,7 +323,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
By("verify that a switchover happened", func() {
// Check that a switchover happened
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimary, err
}, timeout).ShouldNot(BeEquivalentTo(oldPrimary))
})
@@ -337,10 +346,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
func() {
// max_connection decrease to 105
timeout := 300
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary))
oldPrimary := cluster.Status.CurrentPrimary
@@ -356,12 +365,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
// Check that the new parameter has been modified in every pod
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show max_connections")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n"))
return value, err, atoiErr
@@ -372,7 +382,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
By("verify that a switchover not happened", func() {
// Check that a switchover did not happen
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimary, err
}, timeout).Should(BeEquivalentTo(oldPrimary))
})
@@ -384,10 +394,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
func() {
timeout := 300
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary))
oldPrimary := cluster.Status.CurrentPrimary
@@ -402,12 +412,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
// Check that the new parameter has been modified in every pod
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show max_connections")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n"))
return value, err, atoiErr
@@ -418,7 +429,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
By("verify that a switchover not happened", func() {
// Check that a switchover did not happen
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimary, err
}, timeout).Should(BeEquivalentTo(oldPrimary))
})
@@ -427,18 +438,19 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
// pg_ident_file_mappings is available from v15 only
It("09. reloading Pg when pg_ident rules are modified", func() {
if env.PostgresVersion > 14 {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
query := "select count(1) from pg_ident_file_mappings;"
By("check that there is only one entry in pg_ident_file_mappings", func() {
Eventually(func() (string, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
query)
return strings.Trim(stdout, "\n"), err
}, timeout).Should(BeEquivalentTo("1"))
@@ -452,12 +464,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
By("verify that there are now two entries in pg_ident_file_mappings", func() {
Eventually(func() (string, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
query)
return strings.Trim(stdout, "\n"), err
}, timeout).Should(BeEquivalentTo("2"))
@@ -484,10 +497,10 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
const namespacePrefix = "config-change-primary-update-restart"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(clusterFileWithPrimaryUpdateRestart)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterFileWithPrimaryUpdateRestart)
Expect(err).ToNot(HaveOccurred())
By("setting up cluster with primaryUpdateMethod value set to restart", func() {
@@ -504,16 +517,19 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
var primaryStartTime time.Time
By("getting old primary info", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
oldPrimaryPodName = primaryPodInfo.GetName()
- forward, conn, err := utils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- utils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
Expect(err).ToNot(HaveOccurred())
@@ -544,7 +560,7 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
})
By(fmt.Sprintf("updating max_connection value to %v", newMaxConnectionsValue), func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -555,17 +571,18 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
})
By("verifying the new value for max_connections is updated for all instances", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show max_connections")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n"))
return value, err, atoiErr
@@ -576,7 +593,7 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
By("verifying the old primary is still the primary", func() {
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimary, err
}, 60).Should(BeEquivalentTo(oldPrimaryPodName))
})
@@ -591,11 +608,12 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
// take pg postmaster start time
query := "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');"
- stdout, _, cmdErr := env.EventuallyExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, cmdErr := exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
- }, utils.PostgresDBName,
+ }, postgres.PostgresDBName,
query,
RetryTimeout,
PollingTime,
@@ -614,7 +632,7 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
const expectedNewValueForWorkMem = "10MB"
By("updating work mem ", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -624,18 +642,19 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
})
By("verify that work_mem result as expected", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Check that the parameter has been modified in every pod
for _, pod := range podList.Items {
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- utils.PostgresDBName,
+ postgres.PostgresDBName,
"show work_mem")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n"))
return value, err, atoiErr
diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go
index fd962159f0..27b770922b 100644
--- a/tests/e2e/connection_test.go
+++ b/tests/e2e/connection_test.go
@@ -23,7 +23,8 @@ import (
"k8s.io/apimachinery/pkg/types"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -51,7 +52,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity
appDBUser string,
appPassword string,
superuserPassword string,
- env *utils.TestingEnvironment,
+ env *environment.TestingEnvironment,
) {
// We test -rw, -ro and -r services with the app user and the superuser
rwService := fmt.Sprintf("%v-rw", clusterName)
@@ -59,8 +60,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity
roService := fmt.Sprintf("%v-ro", clusterName)
services := []string{rwService, roService, rService}
for _, service := range services {
- AssertConnection(namespace, service, appDBName, utils.PostgresDBName, superuserPassword, env)
- AssertConnection(namespace, service, appDBName, appDBUser, appPassword, env)
+ AssertConnection(namespace, service, appDBName, postgres.PostgresDBName, superuserPassword, env)
}
AssertWritesToReplicaFails(namespace, roService, appDBName, appDBUser, appPassword)
@@ -78,7 +78,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity
It("can connect with auto-generated passwords", func() {
// Create a cluster in a namespace we'll delete after the test
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -123,7 +123,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity
// Create a cluster in a namespace we'll delete after the test
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
AssertServices(namespace, clusterName, appDBName, appDBUser,
diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go
index 594bba356c..5cf4c6b010 100644
--- a/tests/e2e/declarative_database_management_test.go
+++ b/tests/e2e/declarative_database_management_test.go
@@ -24,7 +24,12 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -58,10 +63,10 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
By("setting up cluster and declarative database CRD", func() {
@@ -74,8 +79,9 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
"and encoding = pg_char_to_encoding('%s') and datctype = '%s' and datcollate = '%s'",
db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate)
Eventually(func(g Gomega) {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod,
},
@@ -96,7 +102,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
)
By("applying Database CRD manifest", func() {
CreateResourceFromFile(namespace, databaseManifest)
- databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest)
+ databaseObjectName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest)
Expect(err).NotTo(HaveOccurred())
})
By("ensuring the Database CRD succeeded reconciliation", func() {
@@ -116,24 +122,24 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
})
By("verifying new database has been created with the expected fields", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName,
databaseExistsQuery(dbname), "t"), 30).Should(Succeed())
assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database)
})
By("removing the Database object", func() {
- Expect(testsUtils.DeleteObject(env, &database)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, &database)).To(Succeed())
})
By("verifying the retention policy in the postgres database", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName,
databaseExistsQuery(dbname), boolPGOutput(retainOnDeletion)), 30).Should(Succeed())
})
}
@@ -166,10 +172,10 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
)
It("will not prevent the deletion of the namespace with lagging finalizers", func() {
By("setting up the new namespace and cluster", func() {
- err = env.CreateNamespace(namespace)
+ err = namespaces.CreateNamespace(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
@@ -177,7 +183,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
By("creating the database", func() {
databaseManifest := fixturesDir +
"/declarative_databases/database-with-delete-reclaim-policy.yaml.template"
- databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest)
+ databaseObjectName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest)
Expect(err).NotTo(HaveOccurred())
CreateResourceFromFile(namespace, databaseManifest)
})
@@ -195,7 +201,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test
}, 300).WithPolling(10 * time.Second).Should(Succeed())
})
By("deleting the namespace and making sure it succeeds before timeout", func() {
- err := env.DeleteNamespaceAndWait(namespace, 120)
+ err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, namespace, 120)
Expect(err).ToNot(HaveOccurred())
})
})
diff --git a/tests/e2e/declarative_hibernation_test.go b/tests/e2e/declarative_hibernation_test.go
index 7f30bf1070..c08333f120 100644
--- a/tests/e2e/declarative_hibernation_test.go
+++ b/tests/e2e/declarative_hibernation_test.go
@@ -24,7 +24,9 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -47,10 +49,10 @@ var _ = Describe("Cluster declarative hibernation", func() {
It("hibernates an existing cluster", func(ctx SpecContext) {
const namespacePrefix = "declarative-hibernation"
- clusterName, err := env.GetResourceNameFromYAML(sampleFileCluster)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileCluster)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("creating a new cluster", func() {
@@ -66,7 +68,7 @@ var _ = Describe("Cluster declarative hibernation", func() {
})
By("hibernating the new cluster", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
if cluster.Annotations == nil {
cluster.Annotations = make(map[string]string)
@@ -79,19 +81,20 @@ var _ = Describe("Cluster declarative hibernation", func() {
By("waiting for the cluster to be hibernated correctly", func() {
Eventually(func(g Gomega) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).NotTo(HaveOccurred())
- g.Expect(meta.IsStatusConditionTrue(cluster.Status.Conditions, hibernation.HibernationConditionType)).To(BeTrue())
+ g.Expect(meta.IsStatusConditionTrue(cluster.Status.Conditions,
+ hibernation.HibernationConditionType)).To(BeTrue())
}, 300).Should(Succeed())
})
By("verifying that the Pods have been deleted for the cluster", func() {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(len(podList.Items)).Should(BeEquivalentTo(0))
})
By("rehydrating the cluster", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
if cluster.Annotations == nil {
cluster.Annotations = make(map[string]string)
@@ -105,7 +108,7 @@ var _ = Describe("Cluster declarative hibernation", func() {
By("waiting for the condition to be removed", func() {
Eventually(func(g Gomega) {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
condition := meta.FindStatusCondition(cluster.Status.Conditions, hibernation.HibernationConditionType)
@@ -115,7 +118,7 @@ var _ = Describe("Cluster declarative hibernation", func() {
By("waiting for the Pods to be recreated", func() {
Eventually(func(g Gomega) {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(len(podList.Items)).Should(BeEquivalentTo(cluster.Spec.Instances))
}, 300).Should(Succeed())
})
diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go
index d838034d9d..c616dd8950 100644
--- a/tests/e2e/disk_space_test.go
+++ b/tests/e2e/disk_space_test.go
@@ -28,7 +28,11 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -48,19 +52,20 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() {
var primaryPod *corev1.Pod
By("finding cluster resources", func() {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster).ToNot(BeNil())
- primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(primaryPod).ToNot(BeNil())
})
By("filling the WAL volume", func() {
timeout := time.Minute * 5
- _, _, err := env.ExecCommandInInstancePod(
- testsUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
},
@@ -73,35 +78,37 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() {
By("writing something when no space is available", func() {
// Create the table used by the scenario
query := "CREATE TABLE diskspace AS SELECT generate_series(1, 1000000);"
- _, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.AppDBName,
+ postgres.AppDBName,
query)
Expect(err).To(HaveOccurred())
query = "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT"
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
Expect(err).To(HaveOccurred())
})
By("waiting for the primary to become not ready", func() {
Eventually(func(g Gomega) bool {
- primaryPod, err := env.GetPod(namespace, primaryPod.Name)
+ primaryPod, err := pods.Get(env.Ctx, env.Client, namespace, primaryPod.Name)
g.Expect(err).ToNot(HaveOccurred())
- return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse)
+ return pods.HasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse)
}).WithTimeout(time.Minute).Should(BeTrue())
})
By("checking if the operator detects the issue", func() {
Eventually(func(g Gomega) string {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
return cluster.Status.Phase
}).WithTimeout(time.Minute).Should(Equal("Not enough disk space"))
@@ -114,11 +121,11 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() {
primaryWALPVC := &corev1.PersistentVolumeClaim{}
By("finding cluster resources", func() {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster).ToNot(BeNil())
- primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(primaryPod).ToNot(BeNil())
@@ -159,19 +166,20 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() {
// We can't delete the Pod, as this will trigger
// a failover.
Eventually(func(g Gomega) bool {
- primaryPod, err := env.GetPod(namespace, primaryPod.Name)
+ primaryPod, err := pods.Get(env.Ctx, env.Client, namespace, primaryPod.Name)
g.Expect(err).ToNot(HaveOccurred())
- return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue)
+ return pods.HasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue)
}).WithTimeout(10 * time.Minute).Should(BeTrue())
})
By("writing some WAL", func() {
query := "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT"
- _, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
Expect(err).NotTo(HaveOccurred())
})
@@ -191,10 +199,10 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() {
func(sampleFile string) {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go
index d3ac7f8907..5b2055cdd1 100644
--- a/tests/e2e/drain_node_test.go
+++ b/tests/e2e/drain_node_test.go
@@ -26,8 +26,12 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -43,7 +47,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
if testLevelEnv.Depth < int(level) {
Skip("Test depth is lower than the amount requested for this test")
}
- nodes, _ := env.GetNodeList()
+ nodes, _ := nodes.List(env.Ctx, env.Client)
// We label three nodes where we could run the workloads, and ignore
// the others. The pods of the clusters created in this test run only
// where the drain label exists.
@@ -51,7 +55,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) {
nodesWithLabels = append(nodesWithLabels, node.Name)
cmd := fmt.Sprintf("kubectl label node %v drain=drain --overwrite", node.Name)
- _, stderr, err := testsUtils.Run(cmd)
+ _, stderr, err := run.Run(cmd)
Expect(stderr).To(BeEmpty())
Expect(err).ToNot(HaveOccurred())
}
@@ -66,11 +70,11 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
AfterEach(func() {
// Uncordon the cordoned nodes and remove the labels we added in the
// BeforeEach section
- err := nodes.UncordonAllNodes(env)
+ err := nodes.UncordonAll(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
for _, node := range nodesWithLabels {
cmd := fmt.Sprintf("kubectl label node %v drain- ", node)
- _, _, err := testsUtils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
}
nodesWithLabels = nil
@@ -97,13 +101,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// mark a node unschedulable so the pods will be distributed only on two nodes
for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-2] {
cmd := fmt.Sprintf("kubectl cordon %v", cordonNode)
- _, _, err := testsUtils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
}
})
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -111,7 +115,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// Wait for jobs to be removed
timeout := 180
Eventually(func() (int, error) {
- podList, err := env.GetPodList(namespace)
+ podList, err := pods.List(env.Ctx, env.Client, namespace)
+ if err != nil {
+ return 0, err
+ }
return len(podList.Items), err
}, timeout).Should(BeEquivalentTo(3))
})
@@ -121,7 +128,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "test",
}
AssertCreateTestData(env, tableLocator)
@@ -130,7 +137,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// their volumes. We do not expect the UIDs to change.
// We take advantage of the fact that related PVCs and Pods have
// the same name.
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
pvcUIDMap := make(map[string]types.UID)
for _, pod := range podList.Items {
@@ -145,20 +152,26 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
}
// Drain the node containing the primary pod and store the list of running pods
- podsOnPrimaryNode := nodes.DrainPrimaryNode(namespace, clusterName,
- testTimeouts[testsUtils.DrainNode], env)
+ podsOnPrimaryNode := nodes.DrainPrimary(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ testTimeouts[testsUtils.DrainNode],
+ )
By("verifying failover after drain", func() {
timeout := 180
// Expect a failover to have happened
Eventually(func() (string, error) {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return pod.Name, err
}, timeout).ShouldNot(BeEquivalentTo(oldPrimary))
})
By("uncordon nodes and check new pods use old pvcs", func() {
- err := nodes.UncordonAllNodes(env)
+ err := nodes.UncordonAll(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
// Ensure evicted pods have restarted and are running.
// one of them could have become the new primary.
@@ -212,13 +225,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-1] {
cordonNodes = append(cordonNodes, cordonNode)
cmd := fmt.Sprintf("kubectl cordon %v", cordonNode)
- _, _, err := testsUtils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
}
})
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -226,7 +239,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// Wait for jobs to be removed
timeout := 180
Eventually(func() (int, error) {
- podList, err := env.GetPodList(namespace)
+ podList, err := pods.List(env.Ctx, env.Client, namespace)
+ if err != nil {
+ return 0, err
+ }
return len(podList.Items), err
}, timeout).Should(BeEquivalentTo(3))
})
@@ -236,7 +252,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "test",
}
AssertCreateTestData(env, tableLocator)
@@ -245,7 +261,9 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// their volumes. We do not expect the UIDs to change.
// We take advantage of the fact that related PVCs and Pods have
// the same name.
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+
pvcUIDMap := make(map[string]types.UID)
for _, pod := range podList.Items {
pvcNamespacedName := types.NamespacedName{
@@ -262,19 +280,25 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// to move to.
By(fmt.Sprintf("uncordon one more node '%v'", cordonNodes[0]), func() {
cmd := fmt.Sprintf("kubectl uncordon %v", cordonNodes[0])
- _, _, err = testsUtils.Run(cmd)
+ _, _, err = run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
})
// Drain the node containing the primary pod and store the list of running pods
- podsOnPrimaryNode := nodes.DrainPrimaryNode(namespace, clusterName,
- testTimeouts[testsUtils.DrainNode], env)
+ podsOnPrimaryNode := nodes.DrainPrimary(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ testTimeouts[testsUtils.DrainNode],
+ )
By("verifying failover after drain", func() {
timeout := 180
// Expect a failover to have happened
Eventually(func() (string, error) {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return pod.Name, err
}, timeout).ShouldNot(BeEquivalentTo(oldPrimary))
})
@@ -334,13 +358,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
By("leaving a single uncordoned", func() {
for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-1] {
cmd := fmt.Sprintf("kubectl cordon %v", cordonNode)
- _, _, err := testsUtils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
}
})
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -349,7 +373,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// Wait for jobs to be removed
timeout := 180
Eventually(func() (int, error) {
- podList, err := env.GetPodList(namespace)
+ podList, err := pods.List(env.Ctx, env.Client, namespace)
+ if err != nil {
+ return 0, err
+ }
return len(podList.Items), err
}, timeout).Should(BeEquivalentTo(3))
})
@@ -358,7 +385,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// not exist anymore after the drain
var podsBeforeDrain []string
By("retrieving the current pods' names", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
podsBeforeDrain = append(podsBeforeDrain, pod.Name)
@@ -369,7 +396,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "test",
}
AssertCreateTestData(env, tableLocator)
@@ -377,13 +404,16 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// We uncordon a cordoned node. New pods can go there.
By("uncordon node for pod failover", func() {
cmd := fmt.Sprintf("kubectl uncordon %v", nodesWithLabels[0])
- _, _, err := testsUtils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
})
// Drain the node containing the primary pod. Pods should be moved
// to the node we've just uncordoned
- nodes.DrainPrimaryNode(namespace, clusterName, testTimeouts[testsUtils.DrainNode], env)
+ nodes.DrainPrimary(
+ env.Ctx, env.Client,
+ namespace, clusterName, testTimeouts[testsUtils.DrainNode],
+ )
// Expect pods to be recreated and to be ready
AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
@@ -393,7 +423,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
timeout := 600
Eventually(func(g Gomega) {
matchingNames := 0
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
// compare the old pod list with the current pod names
@@ -410,7 +440,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
AssertDataExpectedCount(env, tableLocator, 2)
AssertClusterStandbysAreStreaming(namespace, clusterName, 140)
- err = nodes.UncordonAllNodes(env)
+ err = nodes.UncordonAll(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
})
})
@@ -424,7 +454,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
BeforeAll(func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
})
@@ -436,7 +466,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
// Wait for jobs to be removed
timeout := 180
Eventually(func() (int, error) {
- podList, err := env.GetPodList(namespace)
+ podList, err := pods.List(env.Ctx, env.Client, namespace)
+ if err != nil {
+ return 0, err
+ }
return len(podList.Items), err
}, timeout).Should(BeEquivalentTo(1))
})
@@ -445,20 +478,23 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "test",
}
AssertCreateTestData(env, tableLocator)
// Drain the node containing the primary pod and store the list of running pods
- _ = nodes.DrainPrimaryNode(namespace, clusterName,
- testTimeouts[testsUtils.DrainNode], env)
+ _ = nodes.DrainPrimary(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ testTimeouts[testsUtils.DrainNode],
+ )
By("verifying the primary is now pending", func() {
timeout := 180
// Expect a failover to have happened
Eventually(func() (string, error) {
- pod, err := env.GetPod(namespace, clusterName+"-1")
+ pod, err := pods.Get(env.Ctx, env.Client, namespace, clusterName+"-1")
if err != nil {
return "", err
}
@@ -467,7 +503,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
})
By("uncordoning all nodes", func() {
- err := nodes.UncordonAllNodes(env)
+ err := nodes.UncordonAll(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
})
@@ -479,7 +515,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
When("the PDB is enabled", func() {
It("prevents the primary node from being drained", func() {
By("enabling PDB", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -491,7 +527,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
By("having the draining of the primary node rejected", func() {
var primaryNode string
Eventually(func(g Gomega) {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
primaryNode = pod.Spec.NodeName
}, 60).Should(Succeed())
@@ -501,14 +537,14 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
cmd := fmt.Sprintf(
"kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds",
primaryNode, 60)
- _, stderr, err := testsUtils.RunUnchecked(cmd)
+ _, stderr, err := run.Unchecked(cmd)
g.Expect(err).To(HaveOccurred())
g.Expect(stderr).To(ContainSubstring("Cannot evict pod as it would violate the pod's disruption budget"))
}, 60).Should(Succeed())
})
By("uncordoning all nodes", func() {
- err := nodes.UncordonAllNodes(env)
+ err := nodes.UncordonAll(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
})
})
diff --git a/tests/e2e/eviction_test.go b/tests/e2e/eviction_test.go
index 077d3a1f56..121f4f14fd 100644
--- a/tests/e2e/eviction_test.go
+++ b/tests/e2e/eviction_test.go
@@ -28,7 +28,11 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -59,7 +63,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
multiInstanceSampleFile = fixturesDir + "/eviction/multi-instance-cluster.yaml.template"
)
- evictPod := func(podName string, namespace string, env *testsUtils.TestingEnvironment, timeoutSeconds uint) error {
+ evictPod := func(podName string, namespace string, env *environment.TestingEnvironment, timeoutSeconds uint) error {
var pod corev1.Pod
err := env.Client.Get(env.Ctx,
ctrlclient.ObjectKey{Namespace: namespace, Name: podName},
@@ -119,18 +123,18 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
}
const namespacePrefix = "single-instance-pod-eviction"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("creating a cluster", func() {
// Create a cluster in a namespace we'll delete after the test
- clusterName, err := env.GetResourceNameFromYAML(singleInstanceSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, singleInstanceSampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, singleInstanceSampleFile, env)
})
})
It("evicts the primary pod in single instance cluster", func() {
- clusterName, err := env.GetResourceNameFromYAML(singleInstanceSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, singleInstanceSampleFile)
Expect(err).ToNot(HaveOccurred())
podName := clusterName + "-1"
err = evictPod(podName, namespace, env, 60)
@@ -152,7 +156,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
})
By("checking the cluster is healthy", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
})
})
})
@@ -176,20 +180,20 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
BeforeAll(func() {
const namespacePrefix = "multi-instance-pod-eviction"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("Creating a cluster with multiple instances", func() {
// Create a cluster in a namespace and shared in containers, we'll delete after the test
- clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, multiInstanceSampleFile, env)
})
By("retrieving the nodeName for primary pod", func() {
var primaryPod *corev1.Pod
- clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile)
Expect(err).ToNot(HaveOccurred())
- primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
taintNodeName = primaryPod.Spec.NodeName
})
@@ -197,8 +201,9 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
AfterAll(func() {
if needRemoveTaint {
By("cleaning the taint on node", func() {
- cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", taintNodeName)
- _, _, err := testsUtils.Run(cmd)
+ cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-",
+ taintNodeName)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
})
}
@@ -207,12 +212,12 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
It("evicts the replica pod in multiple instance cluster", func() {
var podName string
- clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile)
Expect(err).ToNot(HaveOccurred())
// Find the standby pod
By("getting standby pod to evict", func() {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(len(podList.Items)).To(BeEquivalentTo(3))
for _, pod := range podList.Items {
// Avoid parting non ready nodes, non active nodes, or primary nodes
@@ -243,16 +248,16 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
})
By("checking the cluster is healthy", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
})
})
It("evicts the primary pod in multiple instance cluster", func() {
var primaryPod *corev1.Pod
- clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile)
Expect(err).ToNot(HaveOccurred())
- primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// We can not use patch to simulate the eviction of a primary pod;
@@ -260,21 +265,21 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
By("taint the node to simulate pod been evicted", func() {
cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute", taintNodeName)
- _, _, err = testsUtils.Run(cmd)
+ _, _, err = run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
needRemoveTaint = true
time.Sleep(3 * time.Second)
cmd = fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", taintNodeName)
- _, _, err = testsUtils.Run(cmd)
+ _, _, err = run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
needRemoveTaint = false
})
By("checking switchover happens", func() {
Eventually(func() bool {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, p := range podList.Items {
if specs.IsPodPrimary(p) && primaryPod.GetName() != p.GetName() {
@@ -287,7 +292,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() {
// Pod need rejoin, need more time
By("checking the cluster is healthy", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
})
})
})
diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go
index a59695bad6..6940c4b9fe 100644
--- a/tests/e2e/failover_test.go
+++ b/tests/e2e/failover_test.go
@@ -27,7 +27,12 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -52,13 +57,14 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// We check that the currentPrimary is the -1 instance as expected,
// and we define the targetPrimary (-3) and pausedReplica (-2).
By("checking that CurrentPrimary and TargetPrimary are equal", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(cluster.Status.CurrentPrimary, err).To(
BeEquivalentTo(cluster.Status.TargetPrimary))
currentPrimary = cluster.Status.CurrentPrimary
// Gather pod names
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
+ Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items), err).To(BeEquivalentTo(3))
for _, p := range podList.Items {
pods = append(pods, p.Name)
@@ -72,18 +78,19 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// In this way we know that this standby will lag behind when
// we do some work on the primary.
By("pausing the walreceiver on the 2nd node of the Cluster", func() {
- primaryPod, err := env.GetPod(namespace, currentPrimary)
+ primaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, currentPrimary)
Expect(err).ToNot(HaveOccurred())
- pausedPod, err := env.GetPod(namespace, pausedReplica)
+ pausedPod, err := podutils.Get(env.Ctx, env.Client, namespace, pausedReplica)
Expect(err).ToNot(HaveOccurred())
// Get the walreceiver pid
query := "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walreceiver'"
- out, _, err := env.EventuallyExecQueryInInstancePod(
- utils.PodLocator{
+ out, _, err := exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pausedPod.Namespace,
PodName: pausedPod.Name,
- }, utils.PostgresDBName,
+ }, postgres.PostgresDBName,
query,
RetryTimeout,
PollingTime,
@@ -100,11 +107,12 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// We don't want to wait for the replication timeout.
query = fmt.Sprintf("SELECT pg_terminate_backend(pid) FROM pg_stat_replication "+
"WHERE application_name = '%v'", pausedReplica)
- _, _, err = env.EventuallyExecQueryInInstancePod(
- utils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
- }, utils.PostgresDBName,
+ }, postgres.PostgresDBName,
query,
RetryTimeout,
PollingTime,
@@ -113,24 +121,27 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// Expect the primary to have lost connection with the stopped standby
Eventually(func() (int, error) {
- primaryPod, err = env.GetPod(namespace, currentPrimary)
+ primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary)
Expect(err).ToNot(HaveOccurred())
- return utils.CountReplicas(env, primaryPod)
+ return postgres.CountReplicas(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ primaryPod, RetryTimeout)
}, RetryTimeout).Should(BeEquivalentTo(1))
})
// Perform a CHECKPOINT on the primary and wait for the working standby
// to replicate at it
By("generating some WAL traffic in the Cluster", func() {
- primaryPod, err := env.GetPod(namespace, currentPrimary)
+ primaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, currentPrimary)
Expect(err).ToNot(HaveOccurred())
// Gather the current WAL LSN
- initialLSN, _, err := env.EventuallyExecQueryInInstancePod(
- utils.PodLocator{
+ initialLSN, _, err := exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
- }, utils.PostgresDBName,
+ }, postgres.PostgresDBName,
"SELECT pg_current_wal_lsn()",
RetryTimeout,
PollingTime,
@@ -138,11 +149,12 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
Expect(err).ToNot(HaveOccurred())
// Execute a checkpoint
- _, _, err = env.EventuallyExecQueryInInstancePod(
- utils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
- }, utils.PostgresDBName,
+ }, postgres.PostgresDBName,
"CHECKPOINT",
RetryTimeout,
PollingTime,
@@ -155,13 +167,14 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// The replay_lsn of the targetPrimary should be ahead
// of the one before the checkpoint
Eventually(func() (string, error) {
- primaryPod, err = env.GetPod(namespace, currentPrimary)
+ primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary)
Expect(err).ToNot(HaveOccurred())
- out, _, err := env.EventuallyExecQueryInInstancePod(
- utils.PodLocator{
+ out, _, err := exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
- }, utils.PostgresDBName,
+ }, postgres.PostgresDBName,
query,
RetryTimeout,
PollingTime,
@@ -177,18 +190,18 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- err := env.DeletePod(namespace, currentPrimary, quickDelete)
+ err := podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete)
Expect(err).ToNot(HaveOccurred())
// We wait until the operator knows that the primary is dead.
// At this point the promotion is waiting for all the walreceivers
// to be disconnected. We can send the SIGCONT now.
Eventually(func() (int, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.ReadyInstances, err
}, RetryTimeout).Should(BeEquivalentTo(2))
- pausedPod, err := env.GetPod(namespace, pausedReplica)
+ pausedPod, err := podutils.Get(env.Ctx, env.Client, namespace, pausedReplica)
Expect(err).ToNot(HaveOccurred())
// Send the SIGCONT to the walreceiver PID to resume execution
@@ -200,7 +213,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
By("making sure that the operator is enforcing the switchover delay")
timeout := 120
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimaryFailingSinceTimestamp, err
}, timeout).Should(Not(Equal("")))
}
@@ -209,13 +222,13 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// The operator should eventually set the cluster target primary to
// the instance we expect to take that role (-3).
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.TargetPrimary, err
- }, testTimeouts[utils.NewTargetOnFailover]).
+ }, testTimeouts[timeouts.NewTargetOnFailover]).
ShouldNot(
Or(BeEquivalentTo(currentPrimary),
BeEquivalentTo(apiv1.PendingFailoverMarker)))
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(cluster.Status.TargetPrimary, err).To(
BeEquivalentTo(targetPrimary))
})
@@ -224,9 +237,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
// operator to the target primary
By("waiting for the TargetPrimary to become CurrentPrimary", func() {
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
return cluster.Status.CurrentPrimary, err
- }, testTimeouts[utils.NewPrimaryAfterFailover]).Should(BeEquivalentTo(targetPrimary))
+ }, testTimeouts[timeouts.NewPrimaryAfterFailover]).Should(BeEquivalentTo(targetPrimary))
})
}
@@ -244,9 +257,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
)
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -260,10 +273,10 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() {
namespacePrefix = "failover-e2e-delay"
)
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
diff --git a/tests/e2e/fastfailover_test.go b/tests/e2e/fastfailover_test.go
index debccca369..f49921ceb2 100644
--- a/tests/e2e/fastfailover_test.go
+++ b/tests/e2e/fastfailover_test.go
@@ -68,7 +68,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La
clusterName = "cluster-fast-failover"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertFastFailOver(namespace, sampleFileWithoutReplicationSlots, clusterName,
webTestFile, webTestJob, maxReattachTime, maxFailoverTime)
@@ -86,7 +86,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La
clusterName = "cluster-fast-failover"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertFastFailOver(namespace, sampleFileWithReplicationSlots,
clusterName, webTestFile, webTestJob, maxReattachTime, maxFailoverTime)
@@ -100,7 +100,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La
clusterName = "cluster-syncreplicas-fast-failover"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertFastFailOver(
namespace, sampleFileSyncReplicas, clusterName, webTestSyncReplicas, webTestJob, maxReattachTime, maxFailoverTime)
diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go
index d7a45efd47..99c8017bb7 100644
--- a/tests/e2e/fastswitchover_test.go
+++ b/tests/e2e/fastswitchover_test.go
@@ -27,6 +27,11 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -60,7 +65,7 @@ var _ = Describe("Fast switchover", Serial, Label(tests.LabelPerformance, tests.
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "primary-switchover-time"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
assertFastSwitchover(namespace, sampleFileWithoutReplicationSlots, clusterName, webTestFile, webTestJob)
})
@@ -70,7 +75,7 @@ var _ = Describe("Fast switchover", Serial, Label(tests.LabelPerformance, tests.
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "primary-switchover-time-with-slots"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
assertFastSwitchover(namespace, sampleFileWithReplicationSlots, clusterName, webTestFile, webTestJob)
AssertClusterHAReplicationSlots(namespace, clusterName)
@@ -99,7 +104,7 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe
CreateResourceFromFile(namespace, sampleFile)
})
By("having a Cluster with three instances ready", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
// Node 1 should be the primary, so the -rw service should
// point there. We verify this.
@@ -135,7 +140,9 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe
", PRIMARY KEY (id)" +
")"
- _, err := utils.RunExecOverForward(env, namespace, clusterName, utils.AppDBName,
+ _, err := postgres.RunExecOverForward(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName, postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix, query)
Expect(err).ToNot(HaveOccurred())
})
@@ -146,10 +153,10 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe
// on the postgres primary. We make sure that the first
// records appear on the database before moving to the next
// step.
- _, _, err := utils.Run("kubectl create -n " + namespace +
+ _, _, err := run.Run("kubectl create -n " + namespace +
" -f " + webTestFile)
Expect(err).ToNot(HaveOccurred())
- _, _, err = utils.Run("kubectl create -n " + namespace +
+ _, _, err = run.Run("kubectl create -n " + namespace +
" -f " + webTestJob)
Expect(err).ToNot(HaveOccurred())
@@ -164,12 +171,13 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe
if err != nil {
return "", err
}
- out, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- utils.AppDBName,
+ postgres.AppDBName,
query)
return strings.TrimSpace(out), err
}, RetryTimeout).Should(BeEquivalentTo("t"))
@@ -178,7 +186,7 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe
By("setting the TargetPrimary to node2 to trigger a switchover", func() {
targetPrimary = clusterName + "-2"
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
cluster.Status.TargetPrimary = targetPrimary
return env.Client.Status().Update(env.Ctx, cluster)
diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go
index a43e7a4191..acf8291104 100644
--- a/tests/e2e/fencing_test.go
+++ b/tests/e2e/fencing_test.go
@@ -29,7 +29,13 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -78,12 +84,13 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
if err != nil {
return 0, err
}
- out, _, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
if err != nil {
return 0, err
@@ -94,23 +101,23 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
}
checkPostgresConnection := func(podName, namespace string) {
- err := testUtils.GetObject(env, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, &pod)
+ err := objects.Get(env.Ctx, env.Client, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, &pod)
Expect(err).ToNot(HaveOccurred())
timeout := time.Second * 10
dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require",
- testUtils.PGLocalSocketDir, "postgres", "postgres", "")
+ postgres.PGLocalSocketDir, "postgres", "postgres", "")
stdOut, stdErr, err := utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, pod,
specs.PostgresContainerName, &timeout,
"psql", dsn, "-tAc", "SELECT 1")
Expect(err).To(HaveOccurred(), stdErr, stdOut)
}
- checkFencingAnnotationSet := func(fencingMethod testUtils.FencingMethod, content []string) {
- if fencingMethod != testUtils.UsingAnnotation {
+ checkFencingAnnotationSet := func(fencingMethod fencing.Method, content []string) {
+ if fencingMethod != fencing.UsingAnnotation {
return
}
By("checking the cluster has the expected annotation set", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
if len(content) == 0 {
Expect(cluster.Annotations).To(Or(Not(HaveKey(utils.FencedInstanceAnnotation)),
@@ -124,19 +131,20 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
})
}
- assertFencingPrimaryWorks := func(fencingMethod testUtils.FencingMethod) {
+ assertFencingPrimaryWorks := func(fencingMethod fencing.Method) {
It("can fence a primary instance", func() {
var beforeFencingPodName string
By("fencing the primary instance", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
beforeFencingPodName = primaryPod.GetName()
- Expect(testUtils.FencingOn(env, beforeFencingPodName,
+ Expect(fencing.On(env.Ctx, env.Client, beforeFencingPodName,
namespace, clusterName, fencingMethod)).Should(Succeed())
})
By("check the instance is not ready, but kept as primary instance", func() {
checkInstanceStatusReadyOrNot(beforeFencingPodName, namespace, false)
- currentPrimaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(beforeFencingPodName).To(Equal(currentPrimaryPodInfo.GetName()))
})
@@ -146,14 +154,15 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
checkPostgresConnection(beforeFencingPodName, namespace)
})
By("lift the fencing", func() {
- Expect(testUtils.FencingOff(env, beforeFencingPodName,
+ Expect(fencing.Off(env.Ctx, env.Client, beforeFencingPodName,
namespace, clusterName, fencingMethod)).ToNot(HaveOccurred())
})
By("the old primary becomes ready", func() {
checkInstanceStatusReadyOrNot(beforeFencingPodName, namespace, true)
})
By("the old primary should still be the primary instance", func() {
- currentPrimaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(beforeFencingPodName).Should(BeEquivalentTo(currentPrimaryPodInfo.GetName()))
})
@@ -163,12 +172,12 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
checkFencingAnnotationSet(fencingMethod, nil)
})
}
- assertFencingFollowerWorks := func(fencingMethod testUtils.FencingMethod) {
+ assertFencingFollowerWorks := func(fencingMethod fencing.Method) {
It("can fence a follower instance", func() {
var beforeFencingPodName string
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
By("fence a follower instance", func() {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(len(podList.Items)).To(BeEquivalentTo(3))
for _, pod := range podList.Items {
if specs.IsPodStandby(pod) {
@@ -177,7 +186,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
}
}
Expect(beforeFencingPodName).ToNot(BeEmpty())
- Expect(testUtils.FencingOn(env, beforeFencingPodName,
+ Expect(fencing.On(env.Ctx, env.Client, beforeFencingPodName,
namespace, clusterName, fencingMethod)).ToNot(HaveOccurred())
})
checkFencingAnnotationSet(fencingMethod, []string{beforeFencingPodName})
@@ -189,7 +198,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
checkPostgresConnection(beforeFencingPodName, namespace)
})
By("lift the fencing", func() {
- Expect(testUtils.FencingOff(env, beforeFencingPodName,
+ Expect(fencing.Off(env.Ctx, env.Client, beforeFencingPodName,
namespace, clusterName, fencingMethod)).ToNot(HaveOccurred())
})
By("the instance becomes ready", func() {
@@ -201,41 +210,43 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
checkFencingAnnotationSet(fencingMethod, nil)
})
}
- assertFencingClusterWorks := func(fencingMethod testUtils.FencingMethod) {
+ assertFencingClusterWorks := func(fencingMethod fencing.Method) {
It("can fence all the instances in a cluster", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
primaryPodName := primaryPod.GetName()
By("fence the whole cluster using \"(*)\"", func() {
- Expect(testUtils.FencingOn(env, "*", namespace, clusterName, fencingMethod)).ToNot(HaveOccurred())
+ Expect(fencing.On(env.Ctx, env.Client, "*", namespace, clusterName,
+ fencingMethod)).ToNot(HaveOccurred())
})
checkFencingAnnotationSet(fencingMethod, []string{"*"})
By("check all instances are not ready", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podList.Items {
checkInstanceStatusReadyOrNot(pod.GetName(), namespace, false)
}
})
By("check postgres connection on all instances", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podList.Items {
checkPostgresConnection(pod.GetName(), namespace)
}
})
By("lift the fencing", func() {
- Expect(testUtils.FencingOff(env, "*", namespace, clusterName, fencingMethod)).ToNot(HaveOccurred())
+ Expect(fencing.Off(env.Ctx, env.Client, "*", namespace, clusterName,
+ fencingMethod)).ToNot(HaveOccurred())
})
By("all instances become ready", func() {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podList.Items {
checkInstanceStatusReadyOrNot(pod.GetName(), namespace, true)
}
})
By("the old primary is still the primary instance", func() {
- podName, err := env.GetClusterPrimary(namespace, clusterName)
+ podName, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(primaryPodName).Should(BeEquivalentTo(podName.GetName()))
})
@@ -250,33 +261,33 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() {
var err error
BeforeAll(func() {
const namespacePrefix = "fencing-using-plugin"
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
})
- assertFencingPrimaryWorks(testUtils.UsingPlugin)
- assertFencingFollowerWorks(testUtils.UsingPlugin)
- assertFencingClusterWorks(testUtils.UsingPlugin)
+ assertFencingPrimaryWorks(fencing.UsingPlugin)
+ assertFencingFollowerWorks(fencing.UsingPlugin)
+ assertFencingClusterWorks(fencing.UsingPlugin)
})
Context("using annotation", Ordered, func() {
var err error
BeforeAll(func() {
const namespacePrefix = "fencing-using-annotation"
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
})
- assertFencingPrimaryWorks(testUtils.UsingAnnotation)
- assertFencingFollowerWorks(testUtils.UsingAnnotation)
- assertFencingClusterWorks(testUtils.UsingAnnotation)
+ assertFencingPrimaryWorks(fencing.UsingAnnotation)
+ assertFencingFollowerWorks(fencing.UsingAnnotation)
+ assertFencingClusterWorks(fencing.UsingAnnotation)
})
})
diff --git a/tests/e2e/hibernation_test.go b/tests/e2e/hibernation_test.go
index ce4f38ea64..0b4df3876a 100644
--- a/tests/e2e/hibernation_test.go
+++ b/tests/e2e/hibernation_test.go
@@ -30,7 +30,13 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -67,7 +73,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
var clusterManifest []byte
var beforeHibernationCurrentPrimary string
By("collecting current primary details", func() {
- beforeHibernationClusterInfo, err = env.GetCluster(namespace, clusterName)
+ beforeHibernationClusterInfo, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
beforeHibernationCurrentPrimary = beforeHibernationClusterInfo.Status.CurrentPrimary
// collect expected cluster manifesto info
@@ -79,19 +85,19 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
getPvc := func(role persistentvolumeclaim.Meta, instanceName string) corev1.PersistentVolumeClaim {
pvcName := role.GetName(instanceName)
pvcInfo := corev1.PersistentVolumeClaim{}
- err = testsUtils.GetObject(env, ctrlclient.ObjectKey{Namespace: namespace, Name: pvcName}, &pvcInfo)
+ err = objects.Get(env.Ctx, env.Client, ctrlclient.ObjectKey{Namespace: namespace, Name: pvcName}, &pvcInfo)
Expect(err).ToNot(HaveOccurred())
return pvcInfo
}
performHibernation := func(mode mode, namespace, clusterName string) {
By(fmt.Sprintf("performing hibernation %v", mode), func() {
- _, _, err := testsUtils.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v",
+ _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v",
mode, clusterName, namespace))
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() {
Eventually(func(g Gomega) {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(len(podList.Items)).Should(BeEquivalentTo(0))
}, 300).Should(Succeed())
})
@@ -100,7 +106,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
getHibernationStatusInJSON := func(namespace, clusterName string) map[string]interface{} {
var data map[string]interface{}
By("getting hibernation status", func() {
- stdOut, _, err := testsUtils.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v -ojson",
+ stdOut, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v -ojson",
HibernateStatus, clusterName, namespace))
Expect(err).ToNot(HaveOccurred(), stdOut)
err = json.Unmarshal([]byte(stdOut), &data)
@@ -115,14 +121,16 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
Expect(strings.Contains(string(message), actualStatus)).Should(BeEquivalentTo(true),
actualStatus+"\\not-contained-in\\"+string(message))
}
- verifyClusterResources := func(namespace, clusterName string, objs []persistentvolumeclaim.ExpectedObjectCalculator) {
+ verifyClusterResources := func(
+ namespace, clusterName string, objs []persistentvolumeclaim.ExpectedObjectCalculator,
+ ) {
By(fmt.Sprintf("verifying cluster resources are removed "+
"post hibernation where roles %v", objs), func() {
timeout := 120
By(fmt.Sprintf("verifying cluster %v is removed", clusterName), func() {
Eventually(func() (bool, apiv1.Cluster) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return true, apiv1.Cluster{}
}
@@ -132,7 +140,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
By(fmt.Sprintf("verifying cluster %v PVCs are removed", clusterName), func() {
Eventually(func() (int, error) {
- pvcList, err := env.GetPVCList(namespace)
+ pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace)
if err != nil {
return -1, err
}
@@ -200,7 +208,8 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
})
})
}
- verifyPvc := func(expectedObject persistentvolumeclaim.ExpectedObjectCalculator, pvcUid types.UID,
+ verifyPvc := func(
+ expectedObject persistentvolumeclaim.ExpectedObjectCalculator, pvcUid types.UID,
clusterManifest []byte, instanceName string,
) {
pvcInfo := getPvc(expectedObject, instanceName)
@@ -212,12 +221,12 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
utils.PgControldataAnnotationName,
utils.ClusterManifestAnnotationName,
}
- testsUtils.ObjectHasAnnotations(&pvcInfo, expectedAnnotationKeyPresent)
+ storage.ObjectHasAnnotations(&pvcInfo, expectedAnnotationKeyPresent)
expectedAnnotation := map[string]string{
utils.HibernateClusterManifestAnnotationName: string(clusterManifest),
utils.ClusterManifestAnnotationName: string(clusterManifest),
}
- testsUtils.ObjectMatchesAnnotations(&pvcInfo, expectedAnnotation)
+ storage.ObjectMatchesAnnotations(&pvcInfo, expectedAnnotation)
}
assertHibernation := func(namespace, clusterName, tableName string) {
@@ -228,7 +237,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -293,7 +302,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage)
})
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
// Test data should be present after hibernation off
AssertDataExpectedCount(env, tableLocator, 2)
}
@@ -301,10 +310,10 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
When("cluster setup with PG-WAL volume", func() {
It("hibernation process should work", func() {
const namespacePrefix = "hibernation-on-with-pg-wal"
- clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithPGWalVolume)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithPGWalVolume)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env)
assertHibernation(namespace, clusterName, tableName)
@@ -315,17 +324,17 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
var beforeHibernationPgDataPvcUID types.UID
const namespacePrefix = "hibernation-without-pg-wal"
- clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithOutPGWalVolume)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithOutPGWalVolume)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileClusterWithOutPGWalVolume, env)
// Write a table and some data on the "app" database
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -373,7 +382,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage)
})
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
// Test data should be present after hibernation off
AssertDataExpectedCount(env, tableLocator, 2)
})
@@ -381,10 +390,10 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu
When("cluster hibernation after switchover", func() {
It("hibernation process should work", func() {
const namespacePrefix = "hibernation-with-switchover"
- clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithPGWalVolume)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithPGWalVolume)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env)
AssertSwitchover(namespace, clusterName, env)
diff --git a/tests/e2e/initdb_test.go b/tests/e2e/initdb_test.go
index d399e033af..a7922a946b 100644
--- a/tests/e2e/initdb_test.go
+++ b/tests/e2e/initdb_test.go
@@ -22,7 +22,8 @@ import (
"strings"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -47,19 +48,20 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f
namespace,
clusterName,
tableName string,
- dbName utils.DatabaseName,
+ dbName exec.DatabaseName,
expectedCount int,
) {
query := fmt.Sprintf("SELECT count(*) FROM %s", tableName)
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf(
"querying the %s table in the %s database defined by postInit SQL",
tableName, dbName), func() {
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary.Name,
}, dbName,
@@ -88,7 +90,7 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "initdb-postqueries"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
CreateResourceFromFile(namespace, postInitSQLSecretRef)
@@ -127,11 +129,12 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f
"app", 10000)
By("checking inside the database the default locale", func() {
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary.Name,
}, "postgres",
@@ -154,16 +157,17 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "initdb-locale"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, postInitSQLCluster, env)
By("checking inside the database", func() {
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- stdout, _, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary.Name,
}, "postgres",
diff --git a/tests/e2e/logs_test.go b/tests/e2e/logs_test.go
index 7bd3c44259..a8da797b3e 100644
--- a/tests/e2e/logs_test.go
+++ b/tests/e2e/logs_test.go
@@ -28,7 +28,9 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -50,23 +52,26 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
const sampleFile = fixturesDir + "/json_logs/cluster-json-logs.yaml.template"
var namespaceErr error
// Create a cluster in a namespace we'll delete after the test
- namespace, namespaceErr = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, namespaceErr = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(namespaceErr).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
By("verifying the presence of possible logger values", func() {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
for _, pod := range podList.Items {
// Gather pod logs in the form of a Json Array
- logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env)
+ logEntries, err := logs.ParseJSONLogs(
+ env.Ctx, env.Interface,
+ namespace, pod.GetName(),
+ )
Expect(err).NotTo(HaveOccurred(), "unable to parse json logs")
Expect(logEntries).ToNot(BeEmpty(), "no logs found")
// Logger field Assertions
- isPgControlDataLoggerFound := testsUtils.HasLogger(logEntries, "pg_controldata")
+ isPgControlDataLoggerFound := logs.HasLogger(logEntries, "pg_controldata")
Expect(isPgControlDataLoggerFound).To(BeTrue(),
fmt.Sprintf("pg_controldata logger not found in pod %v logs", pod.GetName()))
- isPostgresLoggerFound := testsUtils.HasLogger(logEntries, "postgres")
+ isPostgresLoggerFound := logs.HasLogger(logEntries, "postgres")
Expect(isPostgresLoggerFound).To(BeTrue(),
fmt.Sprintf("postgres logger not found in pod %v logs", pod.GetName()))
}
@@ -74,7 +79,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
By("verifying the format of error queries being logged", func() {
errorTestQuery := "selecct 1\nwith newlines\n"
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
timeout := 300
for _, pod := range podList.Items {
@@ -91,11 +96,14 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
// Eventually the error log line will be logged
Eventually(func(g Gomega) bool {
// Gather pod logs in the form of a Json Array
- logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env)
+ logEntries, err := logs.ParseJSONLogs(
+ env.Ctx, env.Interface,
+ namespace, pod.GetName(),
+ )
g.Expect(err).ToNot(HaveOccurred())
// Gather the record containing the wrong query result
- return testsUtils.AssertQueryRecord(
+ return logs.AssertQueryRecord(
logEntries,
errorTestQuery,
queryError.Error(),
@@ -107,7 +115,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
By("verifying only the primary instance logs write queries", func() {
errorTestQuery := "ccreate table test(var text)"
- primaryPod, _ := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, _ := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
timeout := 300
var queryError error
@@ -123,14 +131,17 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
// Expect the query to be eventually logged on the primary
Eventually(func() (bool, error) {
// Gather pod logs in the form of a Json Array
- logEntries, err := testsUtils.ParseJSONLogs(namespace, primaryPod.GetName(), env)
+ logEntries, err := logs.ParseJSONLogs(
+ env.Ctx, env.Interface,
+ namespace, primaryPod.GetName(),
+ )
if err != nil {
GinkgoWriter.Printf("Error reported while gathering primary pod log %s\n", err.Error())
return false, err
}
// Gather the record containing the wrong query result
- return testsUtils.AssertQueryRecord(logEntries, errorTestQuery, queryError.Error(),
+ return logs.AssertQueryRecord(logEntries, errorTestQuery, queryError.Error(),
logpipe.LoggingCollectorRecordName), nil
}, timeout).Should(BeTrue())
@@ -146,12 +157,15 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
// Expect the query not to be logged on replicas
for _, pod := range podList.Items {
// Gather pod logs in the form of a Json Array
- logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env)
+ logEntries, err := logs.ParseJSONLogs(
+ env.Ctx, env.Interface,
+ namespace, pod.GetName(),
+ )
Expect(err).NotTo(HaveOccurred())
Expect(logEntries).ToNot(BeEmpty())
// No record should be returned in this case
- isQueryRecordContained := testsUtils.AssertQueryRecord(
+ isQueryRecordContained := logs.AssertQueryRecord(
logEntries,
queryError.Error(),
errorTestQuery,
@@ -164,18 +178,18 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
By("verifying pg_rewind logs after deleting the old primary pod", func() {
// Force-delete the primary
- currentPrimary, _ := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimary, _ := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
quickDelete := &client.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- deletePodError := env.DeletePod(namespace, currentPrimary.GetName(), quickDelete)
+ deletePodError := pods.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete)
Expect(deletePodError).ToNot(HaveOccurred())
// Expect a new primary to be elected
timeout := 180
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
GinkgoWriter.Printf("Error reported while getting current primary %s\n", err.Error())
return "", err
@@ -189,14 +203,17 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() {
Eventually(func() (bool, error) {
// Gather pod logs in the form of a JSON slice
- logEntries, err := testsUtils.ParseJSONLogs(namespace, currentPrimary.GetName(), env)
+ logEntries, err := logs.ParseJSONLogs(
+ env.Ctx, env.Interface,
+ namespace, currentPrimary.GetName(),
+ )
if err != nil {
GinkgoWriter.Printf("Error reported while getting the 'pg_rewind' logger in old primary %s, %s\n",
currentPrimary, err.Error())
return false, err
}
// Expect pg_rewind logger to eventually be present on the old primary logs
- return testsUtils.HasLogger(logEntries, "pg_rewind"), nil
+ return logs.HasLogger(logEntries, "pg_rewind"), nil
}, timeout).Should(BeTrue())
})
})
@@ -221,10 +238,10 @@ var _ = Describe("JSON log output unit tests", Label(tests.LabelObservability),
Expect(err).ToNot(HaveOccurred())
It("Can check valid logging_collector record for query", func() {
Expect(parsedRecord).NotTo(BeNil())
- Expect(testsUtils.CheckRecordForQuery(parsedRecord, errorTestQuery, user, database, message)).To(BeTrue())
+ Expect(logs.CheckRecordForQuery(parsedRecord, errorTestQuery, user, database, message)).To(BeTrue())
})
It("Can check valid logging_collector ", func() {
Expect(parsedRecord).NotTo(BeNil())
- Expect(testsUtils.IsWellFormedLogForLogger(parsedRecord, "postgres")).To(BeTrue())
+ Expect(logs.IsWellFormedLogForLogger(parsedRecord, "postgres")).To(BeTrue())
})
})
diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go
index 64fa7bd753..9f431f330d 100644
--- a/tests/e2e/managed_roles_test.go
+++ b/tests/e2e/managed_roles_test.go
@@ -30,7 +30,12 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -69,10 +74,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
BeforeAll(func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
By("setting up cluster with managed roles", func() {
@@ -91,12 +96,13 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
FROM pg_auth_members GROUP BY member
) mem ON member = oid
WHERE rolname =` + pq.QuoteLiteral(roleName)
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
if err != nil {
return []string{ERROR}
@@ -108,15 +114,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
}
assertRoleStatus := func(namespace, clusterName, query, expectedResult string) {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() string {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
if err != nil {
return ""
@@ -136,31 +143,34 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
rolConnLimitInSpec := 4
By("ensuring the roles created in the managed stanza are in the database with correct attributes", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(username), "t"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(userWithPerpetualPass), "t"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(userWithHashedPassword), "t"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(unrealizableUser), "f"), 30).Should(Succeed())
query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+
"and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+
- "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec, rolCreateDBInSpec,
+ "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec,
+ rolCreateDBInSpec,
rolCreateRoleInSpec, rolInheritInSpec, rolReplicationInSpec, rolByPassRLSInSpec, rolConnLimitInSpec)
- query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_roles WHERE rolname='%s'", userWithPerpetualPass)
+ query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_roles WHERE rolname='%s'",
+ userWithPerpetualPass)
for _, q := range []string{query, query2} {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
q)
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(Equal("t\n"))
@@ -168,19 +178,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verifying connectivity of new managed role", func() {
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName,
- username, password, env)
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName,
- userWithHashedPassword, userWithHashedPassword, env)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, username, password, env)
})
By("ensuring the app role has been granted createdb in the managed stanza", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName,
roleExistsQuery(appUsername), "t"), 30).Should(Succeed())
query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+
@@ -189,25 +196,25 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("verifying connectivity of app user", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
appUserSecret := corev1.Secret{}
- err = testsUtils.GetObject(
- env,
+ err = objects.Get(
+ env.Ctx, env.Client,
types.NamespacedName{Name: cluster.GetApplicationSecretName(), Namespace: namespace},
&appUserSecret,
)
Expect(err).NotTo(HaveOccurred())
pass := string(appUserSecret.Data["password"])
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName, appUsername, pass, env)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, appUsername, pass, env)
})
By("Verify show unrealizable role configurations in the status", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() int {
@@ -227,10 +234,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
expectedCreateDB := false
expectedCreateRole := true
expectedConnLmt := int64(10)
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
By("updating role attribute in spec", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -250,8 +257,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("the connection should fail since we disabled the login", func() {
- forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, rwService,
- testsUtils.PostgresDBName, username, password)
+ forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ namespace, rwService, postgres.PostgresDBName, username, password,
+ )
defer func() {
_ = conn.Close()
forwardConn.Close()
@@ -264,7 +273,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("enable Login again", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
updated.Spec.Managed.Roles[0].Login = true
@@ -281,9 +290,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("the connectivity should be success again", func() {
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, password, env)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, username, password, env)
})
})
@@ -299,7 +308,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
defaultRolConnLimit = int64(-1)
)
By("Add role new_role with all attribute omit", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -325,7 +334,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
It("Can update role comment and verify changes in db ", func() {
By("Update comment for role new_role", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -357,11 +366,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
It("Can update role membership and verify changes in db ", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("Remove invalid parent role from unrealizableUser and verify user in database", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -373,16 +382,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
Eventually(func() int {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed())
})
By("Add role in InRole for role new_role and verify in database", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -397,7 +406,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
Eventually(func() int {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
@@ -405,7 +414,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Remove parent role from InRole for role new_role and verify in database", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -419,7 +428,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
Eventually(func() int {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(0))
@@ -427,7 +436,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Mock the error for unrealizable User and verify user in database", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -439,20 +448,20 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
// user not changed
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed())
Eventually(func() int {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile)
}, 30).Should(Equal(1))
Eventually(func() int {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return len(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser])
}, 30).Should(Equal(1))
Eventually(func() string {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
return cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser][0]
}, 30).Should(ContainSubstring(fmt.Sprintf("role \"%s\" is a member of role \"%s\"",
@@ -464,7 +473,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
var err error
newPassword := "ThisIsNew"
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("update password from secrets", func() {
@@ -473,35 +482,36 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verify connectivity using changed password in secret", func() {
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
// assert connectable use username and password defined in secrets
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, username, newPassword, env)
})
By("Update password in database", func() {
query := fmt.Sprintf("ALTER ROLE %s WITH PASSWORD %s",
username, pq.QuoteLiteral(newPassword))
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
Expect(err).ToNot(HaveOccurred())
})
By("Verify password in secrets is still valid", func() {
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env)
+ rwService := services.GetReadWriteServiceName(clusterName)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, username, newPassword, env)
})
})
It("Can update role password validUntil and verify in the database", func() {
newValidUntilString := "2023-04-04T00:00:00.000000Z"
By("Update comment for role new_role", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
for i, r := range updated.Spec.Managed.Roles {
@@ -537,7 +547,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
It("Can drop role with ensure absent option", func() {
By("Delete role new_role with EnsureOption ", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -551,9 +561,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic
})
By("Verify new_role not existed in db", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName,
roleExistsQuery(newUserName), "f"), 30).Should(Succeed())
})
})
diff --git a/tests/e2e/managed_services_test.go b/tests/e2e/managed_services_test.go
index ee139aa744..348d051472 100644
--- a/tests/e2e/managed_services_test.go
+++ b/tests/e2e/managed_services_test.go
@@ -27,7 +27,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -53,14 +55,14 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
It("should create and delete a rw managed service", func(ctx SpecContext) {
const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-rw.yaml.template"
const serviceName = "test-rw"
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("ensuring the service is created", func() {
@@ -78,32 +80,32 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
By("ensuring the service is deleted when removed from the additional field", func() {
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{}
return env.Client.Update(ctx, cluster)
}, RetryTimeout, PollingTime).Should(Succeed())
- AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ManagedServices], env)
Eventually(func(g Gomega) {
var serviceRW corev1.Service
err = env.Client.Get(ctx, types.NamespacedName{Name: serviceName, Namespace: namespace}, &serviceRW)
g.Expect(apierrs.IsNotFound(err)).To(BeTrue())
- }, testTimeouts[utils.ManagedServices]).Should(Succeed())
+ }, testTimeouts[timeouts.ManagedServices]).Should(Succeed())
})
})
It("should properly handle disabledDefaultServices field", func(ctx SpecContext) {
const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-no-default.yaml.template"
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
ro := specs.CreateClusterReadOnlyService(*cluster)
@@ -124,45 +126,45 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
By("creating them when they are re-enabled", func() {
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{}
return env.Client.Update(ctx, cluster)
}, RetryTimeout, PollingTime).Should(Succeed())
- AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ManagedServices], env)
Eventually(func(g Gomega) {
var service corev1.Service
err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rw.Name}, &service)
g.Expect(err).ToNot(HaveOccurred())
- }, testTimeouts[utils.ManagedServices]).Should(Succeed())
+ }, testTimeouts[timeouts.ManagedServices]).Should(Succeed())
Eventually(func(g Gomega) {
var service corev1.Service
err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: ro.Name}, &service)
g.Expect(err).ToNot(HaveOccurred())
- }, testTimeouts[utils.ManagedServices]).Should(Succeed())
+ }, testTimeouts[timeouts.ManagedServices]).Should(Succeed())
Eventually(func(g Gomega) {
var service corev1.Service
err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: r.Name}, &service)
g.Expect(err).ToNot(HaveOccurred())
- }, testTimeouts[utils.ManagedServices]).Should(Succeed())
+ }, testTimeouts[timeouts.ManagedServices]).Should(Succeed())
})
})
It("should properly handle replace update strategy", func(ctx SpecContext) {
const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-replace-strategy.yaml.template"
const serviceName = "test-rw"
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
var creationTimestamp metav1.Time
@@ -185,7 +187,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
By("updating the service definition", func() {
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.ObjectMeta.Labels["new-label"] = "new"
return env.Client.Update(ctx, cluster)
@@ -200,7 +202,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa
g.Expect(service.Labels["new-label"]).To(Equal("new"))
g.Expect(service.UID).ToNot(Equal(uid))
g.Expect(service.CreationTimestamp).ToNot(Equal(creationTimestamp))
- }, testTimeouts[utils.ManagedServices]).Should(Succeed())
+ }, testTimeouts[timeouts.ManagedServices]).Should(Succeed())
})
})
})
diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go
index e057220f35..430c89fa17 100644
--- a/tests/e2e/metrics_test.go
+++ b/tests/e2e/metrics_test.go
@@ -28,7 +28,10 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -92,29 +95,30 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
AssertGatherMetrics := func(namespacePrefix, clusterFile string) {
// Create the cluster namespace
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 2, 1)
- metricsClusterName, err := env.GetResourceNameFromYAML(clusterFile)
+ metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterFile)
Expect(err).ToNot(HaveOccurred())
// Create the cluster
AssertCreateCluster(namespace, metricsClusterName, clusterFile, env)
- cluster, err := env.GetCluster(namespace, metricsClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).NotTo(HaveOccurred())
// Check metrics on each pod
By("ensuring metrics are correct on each pod", func() {
- podList, err := env.GetClusterPodList(namespace, metricsClusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).ToNot(HaveOccurred())
// Gather metrics in each pod
for _, pod := range podList.Items {
By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() {
- out, err := utils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled())
+ out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod,
+ cluster.IsMetricsTLSEnabled())
Expect(err).ToNot(HaveOccurred(), "while getting pod metrics")
expectedMetrics := buildExpectedMetrics(cluster, !specs.IsPodPrimary(pod))
assertIncludesMetrics(out, expectedMetrics)
@@ -140,10 +144,10 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
It("can gather metrics with multiple target databases", func() {
const namespacePrefix = "metrics-target-databases-e2e"
- metricsClusterName, err := env.GetResourceNameFromYAML(clusterMetricsDBFile)
+ metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterMetricsDBFile)
Expect(err).ToNot(HaveOccurred())
// Create the cluster namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCustomMetricsResourcesExist(namespace, customQueriesTargetDBSampleFile, 1, 1)
@@ -153,7 +157,7 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBTwo, testTableName)
AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBSecret, testTableName)
- cluster, err := env.GetCluster(namespace, metricsClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).ToNot(HaveOccurred())
AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster)
@@ -162,10 +166,10 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
It("can gather default metrics details", func() {
const clusterWithDefaultMetricsFile = fixturesDir + "/base/cluster-storage-class.yaml.template"
const namespacePrefix = "default-metrics-details"
- metricsClusterName, err := env.GetResourceNameFromYAML(clusterWithDefaultMetricsFile)
+ metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithDefaultMetricsFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, metricsClusterName, clusterWithDefaultMetricsFile, env)
@@ -181,30 +185,32 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
return err
}, 10).ShouldNot(HaveOccurred())
})
- cluster, err := env.GetCluster(namespace, metricsClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).ToNot(HaveOccurred())
- collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), true)
+ collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(),
+ true)
})
It("can gather metrics depending on the predicate query", func() {
// Create the cluster namespace
const namespacePrefix = "predicate-query-metrics-e2e"
- metricsClusterName, err := env.GetResourceNameFromYAML(clusterMetricsPredicateQueryFile)
+ metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterMetricsPredicateQueryFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1, 0)
+ AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1,
+ 0)
// Create the cluster
AssertCreateCluster(namespace, metricsClusterName, clusterMetricsPredicateQueryFile, env)
By("ensuring only metrics with a positive predicate are collected", func() {
- podList, err := env.GetClusterPodList(namespace, metricsClusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).ToNot(HaveOccurred())
- cluster, err := env.GetCluster(namespace, metricsClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).ToNot(HaveOccurred())
// We expect only the metrics that have a predicate_query valid.
@@ -223,7 +229,8 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
// Gather metrics in each pod
for _, pod := range podList.Items {
By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() {
- out, err := utils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled())
+ out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod,
+ cluster.IsMetricsTLSEnabled())
Expect(err).ToNot(HaveOccurred(), "while getting pod metrics")
assertIncludesMetrics(out, expectedMetrics)
assertExcludesMetrics(out, nonCollectableMetrics)
@@ -237,18 +244,19 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
const defaultMonitoringQueriesDisableSampleFile = fixturesDir +
"/metrics/cluster-disable-default-metrics.yaml.template"
const namespacePrefix = "disable-default-metrics"
- metricsClusterName, err := env.GetResourceNameFromYAML(defaultMonitoringQueriesDisableSampleFile)
+ metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, defaultMonitoringQueriesDisableSampleFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create the cluster
AssertCreateCluster(namespace, metricsClusterName, defaultMonitoringQueriesDisableSampleFile, env)
- cluster, err := env.GetCluster(namespace, metricsClusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName)
Expect(err).ToNot(HaveOccurred())
- collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), false)
+ collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(),
+ false)
})
It("execute custom queries against the application database on replica clusters", func() {
@@ -263,15 +271,15 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
)
// Fetching the source cluster name
- srcClusterName, err := env.GetResourceNameFromYAML(srcClusterSampleFile)
+ srcClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, srcClusterSampleFile)
Expect(err).ToNot(HaveOccurred())
// Fetching replica cluster name
- replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleFile)
+ replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleFile)
Expect(err).ToNot(HaveOccurred())
// create namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Creating and verifying custom queries configmap
@@ -290,8 +298,11 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
)
By(fmt.Sprintf("grant select permission for %v table to pg_monitor", testTableName), func() {
- forward, conn, err := utils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
srcClusterName,
srcClusterDatabaseName,
@@ -307,17 +318,18 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
_, err = conn.Exec(cmd)
Expect(err).ToNot(HaveOccurred())
})
- replicaCluster, err := env.GetCluster(namespace, replicaClusterName)
+ replicaCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, replicaClusterName)
Expect(err).ToNot(HaveOccurred())
By("collecting metrics on each pod and checking that the table has been found", func() {
- podList, err := env.GetClusterPodList(namespace, replicaClusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, replicaClusterName)
Expect(err).ToNot(HaveOccurred())
// Gather metrics in each pod
expectedMetric := fmt.Sprintf("cnpg_%v_row_count 3", testTableName)
for _, pod := range podList.Items {
- out, err := utils.RetrieveMetricsFromInstance(env, pod, replicaCluster.IsMetricsTLSEnabled())
+ out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod,
+ replicaCluster.IsMetricsTLSEnabled())
Expect(err).Should(Not(HaveOccurred()))
Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric))
}
diff --git a/tests/e2e/monitoring_test.go b/tests/e2e/monitoring_test.go
index 6fabfaf38d..c1ef082a10 100644
--- a/tests/e2e/monitoring_test.go
+++ b/tests/e2e/monitoring_test.go
@@ -17,8 +17,15 @@ limitations under the License.
package e2e
import (
+ "context"
+
+ monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
+ "k8s.io/apimachinery/pkg/types"
+ k8client "sigs.k8s.io/controller-runtime/pkg/client"
+
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -26,6 +33,24 @@ import (
// Set of tests that set up a cluster with monitoring support enabled
var _ = Describe("PodMonitor support", Serial, Label(tests.LabelObservability), func() {
+ getPodMonitorFunc := func(
+ ctx context.Context,
+ crudClient k8client.Client,
+ namespace, name string,
+ ) (*monitoringv1.PodMonitor, error) {
+ podMonitor := &monitoringv1.PodMonitor{}
+ namespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }
+
+ err := objects.Get(ctx, crudClient, namespacedName, podMonitor)
+ if err != nil {
+ return nil, err
+ }
+ return podMonitor, nil
+ }
+
const (
namespacePrefix = "cluster-monitoring-e2e"
level = tests.Medium
@@ -53,13 +78,13 @@ var _ = Describe("PodMonitor support", Serial, Label(tests.LabelObservability),
})
It("sets up a cluster enabling PodMonitor feature", func() {
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterDefaultName, clusterDefaultMonitoringFile, env)
By("verifying PodMonitor existence", func() {
- podMonitor, err := env.GetPodMonitor(namespace, clusterDefaultName)
+ podMonitor, err := getPodMonitorFunc(env.Ctx, env.Client, namespace, clusterDefaultName)
Expect(err).ToNot(HaveOccurred())
endpoints := podMonitor.Spec.PodMetricsEndpoints
diff --git a/tests/e2e/nodeselector_test.go b/tests/e2e/nodeselector_test.go
index 761b3ecfe3..a93945838c 100644
--- a/tests/e2e/nodeselector_test.go
+++ b/tests/e2e/nodeselector_test.go
@@ -24,7 +24,9 @@ import (
"k8s.io/apimachinery/pkg/types"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -48,7 +50,7 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() {
// We create a namespace and verify it exists
By(fmt.Sprintf("having a %v namespace", namespace), func() {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Creating a namespace should be quick
@@ -76,7 +78,7 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() {
timeout := 120
Eventually(func() bool {
isPending := false
- podList, err := env.GetPodList(namespace)
+ podList, err := pods.List(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
if len(podList.Items) > 0 {
if len(podList.Items[0].Status.Conditions) > 0 {
@@ -107,13 +109,13 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() {
var nodeName string
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// We label one node with the label we have defined in the cluster
// YAML definition
By("labelling a node", func() {
- nodeList, err := env.GetNodeList()
+ nodeList, err := nodes.List(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
// We want to label a node that is uncordoned and untainted,
@@ -126,14 +128,14 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() {
}
}
cmd := fmt.Sprintf("kubectl label node %v nodeselectortest=exists --overwrite", nodeName)
- _, _, err = utils.Run(cmd)
+ _, _, err = run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
})
// All the pods should be running on the labeled node
By("confirm pods run on the labelled node", func() {
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- podList, err := env.GetPodList(namespace)
+ podList, err := pods.List(env.Ctx, env.Client, namespace)
Expect(err).ToNot(HaveOccurred())
for _, podDetails := range podList.Items {
if podDetails.Status.Phase == "Running" {
diff --git a/tests/e2e/openshift_upgrade_test.go b/tests/e2e/openshift_upgrade_test.go
index 3305128b8f..0ba60354ab 100644
--- a/tests/e2e/openshift_upgrade_test.go
+++ b/tests/e2e/openshift_upgrade_test.go
@@ -16,10 +16,17 @@ limitations under the License.
package e2e
import (
+ "fmt"
+ "regexp"
+ "strings"
+
"github.com/blang/semver"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/openshift"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -53,21 +60,21 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere
ocp412, err = semver.Make("4.12.0")
Expect(err).ToNot(HaveOccurred())
// Get current OpenShift Versions
- ocpVersion, err = testsUtils.GetOpenshiftVersion(env)
+ ocpVersion, err = openshift.GetOpenshiftVersion(env.Ctx, env.RestClientConfig)
Expect(err).ToNot(HaveOccurred())
})
cleanupOperator := func() error {
// Cleanup the Operator
- err = testsUtils.DeleteOperatorCRDs(env)
+ err = openshift.DeleteOperatorCRDs(env.Ctx, env.Client)
if err != nil {
return err
}
- err = testsUtils.DeleteSubscription(env)
+ err = openshift.DeleteSubscription(env.Ctx, env.Client)
if err != nil {
return err
}
- err = testsUtils.DeleteCSV(env)
+ err = openshift.DeleteCSV(env.Ctx, env.Client)
if err != nil {
return err
}
@@ -78,7 +85,7 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere
err := cleanupOperator()
Expect(err).ToNot(HaveOccurred())
Eventually(func() error {
- _, err = env.GetOperatorPod()
+ _, err = operator.GetPod(env.Ctx, env.Client)
return err
}, 120).Should(HaveOccurred())
}
@@ -86,21 +93,21 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere
assertClusterIsAligned := func(namespace, clusterName string) {
By("Verifying the cluster pods have been upgraded", func() {
Eventually(func() bool {
- return testsUtils.HasOperatorBeenUpgraded(env)
+ return operator.HasBeenUpgraded(env.Ctx, env.Client)
}).Should(BeTrue())
- operatorPodName, err := testsUtils.GetOperatorPodName(env)
+ operatorPodName, err := operator.GetPodName(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
- expectedVersion, err := testsUtils.GetOperatorVersion("openshift-operators", operatorPodName)
+ expectedVersion, err := operator.Version("openshift-operators", operatorPodName)
Expect(err).ToNot(HaveOccurred())
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
Eventually(func() (string, error) {
- return testsUtils.GetManagerVersion(namespace, pod.Name)
+ return GetManagerVersion(namespace, pod.Name)
}, 300).Should(BeEquivalentTo(expectedVersion))
}
})
@@ -110,13 +117,13 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere
// Apply a subscription in the openshift-operators namespace.
// This should create the operator
By("Applying the initial subscription", func() {
- err := testsUtils.CreateSubscription(env, initialSubscription)
+ err := openshift.CreateSubscription(env.Ctx, env.Client, initialSubscription)
Expect(err).ToNot(HaveOccurred())
- AssertOperatorIsReady()
+ AssertOperatorIsReady(env.Ctx, env.Client, env.Interface)
})
// Gather the version and semantic Versions of the operator
- currentVersion, err := testsUtils.GetSubscriptionVersion(env)
+ currentVersion, err := openshift.GetSubscriptionVersion(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
currentSemVersion, err := semver.Make(currentVersion)
Expect(err).ToNot(HaveOccurred())
@@ -124,27 +131,27 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere
Expect(err).ToNot(HaveOccurred())
// Create a Cluster in a namespace we'll delete at the end
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
By("Patching the status condition if required", func() {
// Patch the status conditions if we are running on a pre new-policy release
if currentSemVersion.LT(newPolicyRelease) {
- err = testsUtils.PatchStatusCondition(namespace, clusterName, env)
+ err = openshift.PatchStatusCondition(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
}
})
By("Applying the upgrade subscription", func() {
// Apply the new subscription to upgrade to a new version of the operator
- err = testsUtils.UpgradeSubscription(env, upgradeSubscription)
+ err = openshift.UpgradeSubscription(env.Ctx, env.Client, upgradeSubscription)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (string, error) {
- return testsUtils.GetSubscriptionVersion(env)
+ return openshift.GetSubscriptionVersion(env.Ctx, env.Client)
}, 300).
ShouldNot(BeEquivalentTo(currentVersion))
- AssertOperatorIsReady()
+ AssertOperatorIsReady(env.Ctx, env.Client, env.Interface)
})
// Check if the upgrade was successful by making sure all the pods
@@ -160,3 +167,18 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere
applyUpgrade("stable-v1", "alpha")
})
})
+
+// GetManagerVersion returns the current manager version of a given pod
+func GetManagerVersion(namespace, podName string) (string, error) {
+ out, _, err := run.Unchecked(fmt.Sprintf(
+ "kubectl -n %v exec %v -c postgres -- /controller/manager version",
+ namespace,
+ podName,
+ ))
+ if err != nil {
+ return "", err
+ }
+ versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`)
+ ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1]
+ return ver, nil
+}
diff --git a/tests/e2e/operator_deployment_test.go b/tests/e2e/operator_deployment_test.go
index 9b121ccfbc..d451723b8c 100644
--- a/tests/e2e/operator_deployment_test.go
+++ b/tests/e2e/operator_deployment_test.go
@@ -18,6 +18,7 @@ package e2e
import (
"github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -34,10 +35,10 @@ var _ = Describe("PostgreSQL operator deployment", Label(tests.LabelBasic, tests
It("sets up the operator", func() {
By("having a pod for the operator in state ready", func() {
- AssertOperatorIsReady()
+ AssertOperatorIsReady(env.Ctx, env.Client, env.Interface)
})
By("having a deployment for the operator in state ready", func() {
- ready, err := env.IsOperatorDeploymentReady()
+ ready, err := operator.IsDeploymentReady(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
Expect(ready).To(BeTrue())
})
diff --git a/tests/e2e/operator_ha_test.go b/tests/e2e/operator_ha_test.go
index 878f93df9d..94a65fe81c 100644
--- a/tests/e2e/operator_ha_test.go
+++ b/tests/e2e/operator_ha_test.go
@@ -21,7 +21,9 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -49,11 +51,11 @@ var _ = Describe("Operator High Availability", Serial,
It("can work as HA mode", func() {
// Make sure there's at least one pod of the operator
- err := env.ScaleOperatorDeployment(1)
+ err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1)
Expect(err).ToNot(HaveOccurred())
// Get Operator Pod name
- operatorPodName, err := env.GetOperatorPod()
+ operatorPodName, err := operator.GetPod(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
By("having an operator already running", func() {
@@ -64,11 +66,11 @@ var _ = Describe("Operator High Availability", Serial,
})
// Get operator namespace
- operatorNamespace, err := env.GetOperatorNamespaceName()
+ operatorNamespace, err := operator.NamespaceName(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
// Create the cluster namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create Cluster
@@ -76,18 +78,20 @@ var _ = Describe("Operator High Availability", Serial,
By("verifying current leader", func() {
// Check for the current Operator Pod leader from ConfigMap
- Expect(testsUtils.GetLeaderInfoFromLease(operatorNamespace, env)).To(HavePrefix(operatorPodName.GetName()))
+ Expect(operator.GetLeaderInfoFromLease(
+ env.Ctx, env.Interface,
+ operatorNamespace)).To(HavePrefix(operatorPodName.GetName()))
})
By("scale up operator replicas to 3", func() {
// Set old leader pod name to operator pod name
oldLeaderPodName = operatorPodName.GetName()
- err := env.ScaleOperatorDeployment(3)
+ err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 3)
Expect(err).ToNot(HaveOccurred())
// Gather pod names from operator deployment
- podList, err := env.GetPodList(operatorNamespace)
+ podList, err := podutils.List(env.Ctx, env.Client, operatorNamespace)
Expect(err).ToNot(HaveOccurred())
for _, podItem := range podList.Items {
operatorPodNames = append(operatorPodNames, podItem.GetName())
@@ -97,7 +101,9 @@ var _ = Describe("Operator High Availability", Serial,
By("verifying leader information after scale up", func() {
// Check for Operator Pod leader from ConfigMap to be the former one
Eventually(func() (string, error) {
- return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env)
+ return operator.GetLeaderInfoFromLease(
+ env.Ctx, env.Interface,
+ operatorNamespace)
}, 60).Should(HavePrefix(oldLeaderPodName))
})
@@ -106,12 +112,12 @@ var _ = Describe("Operator High Availability", Serial,
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- err = env.DeletePod(operatorNamespace, oldLeaderPodName, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, operatorNamespace, oldLeaderPodName, quickDelete)
Expect(err).ToNot(HaveOccurred())
// Verify operator pod should have been deleted
Eventually(func() []string {
- podList, err := env.GetPodList(operatorNamespace)
+ podList, err := podutils.List(env.Ctx, env.Client, operatorNamespace)
Expect(err).ToNot(HaveOccurred())
var podNames []string
for _, podItem := range podList.Items {
@@ -124,13 +130,15 @@ var _ = Describe("Operator High Availability", Serial,
By("new leader should be configured", func() {
// Verify that the leader name is different from the previous one
Eventually(func() (string, error) {
- return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env)
+ return operator.GetLeaderInfoFromLease(
+ env.Ctx, env.Interface,
+ operatorNamespace)
}, 120).ShouldNot(HavePrefix(oldLeaderPodName))
})
By("verifying reconciliation", func() {
// Get current CNPG cluster's Primary
- currentPrimary, err := env.GetClusterPrimary(namespace, clusterName)
+ currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
oldPrimary := currentPrimary.GetName()
@@ -138,7 +146,7 @@ var _ = Describe("Operator High Availability", Serial,
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete)
Expect(err).ToNot(HaveOccurred())
// Expect a new primary to be elected and promoted
@@ -147,18 +155,20 @@ var _ = Describe("Operator High Availability", Serial,
By("scale down operator replicas to 1", func() {
// Scale down operator deployment to one replica
- err := env.ScaleOperatorDeployment(1)
+ err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1)
Expect(err).ToNot(HaveOccurred())
})
By("verifying leader information after scale down", func() {
// Get Operator Pod name
- operatorPodName, err := env.GetOperatorPod()
+ operatorPodName, err := operator.GetPod(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
// Verify the Operator Pod is the leader
Eventually(func() (string, error) {
- return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env)
+ return operator.GetLeaderInfoFromLease(
+ env.Ctx, env.Interface,
+ operatorNamespace)
}, 120).Should(HavePrefix(operatorPodName.GetName()))
})
})
diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go
index 5f23913135..4a38e29a46 100644
--- a/tests/e2e/operator_unavailable_test.go
+++ b/tests/e2e/operator_unavailable_test.go
@@ -26,14 +26,16 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Set of tests in which we test the concurrent disruption of both the primary
-// and the operator pods, asserting that the latter is able to perform a pending
+// and the operator podutils, asserting that the latter is able to perform a pending
// failover once a new operator pod comes back available.
var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, tests.LabelOperator), func() {
const (
@@ -54,7 +56,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
It("can survive operator failures", func() {
var err error
// Create the cluster namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -63,13 +65,13 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "test",
}
AssertCreateTestData(env, tableLocator)
By("scaling down operator replicas to zero", func() {
- err := env.ScaleOperatorDeployment(0)
+ err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 0)
Expect(err).ToNot(HaveOccurred())
})
@@ -78,7 +80,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- err = env.DeletePod(namespace, currentPrimary, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete)
Expect(err).ToNot(HaveOccurred())
// Expect only 2 instances to be up and running
@@ -108,7 +110,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
By("scaling up the operator replicas to 1", func() {
// Scale up operator deployment to one replica
- err := env.ScaleOperatorDeployment(1)
+ err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1)
Expect(err).ToNot(HaveOccurred())
})
@@ -138,7 +140,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
var operatorPodName string
var err error
// Create the cluster namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -147,12 +149,12 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: "test",
}
AssertCreateTestData(env, tableLocator)
- operatorNamespace, err := env.GetOperatorNamespaceName()
+ operatorNamespace, err := operator.NamespaceName(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
By("deleting primary and operator pod", func() {
@@ -171,11 +173,11 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
wg.Add(1)
wg.Add(1)
go func() {
- _ = env.DeletePod(operatorNamespace, operatorPodName, quickDelete)
+ _ = podutils.Delete(env.Ctx, env.Client, operatorNamespace, operatorPodName, quickDelete)
wg.Done()
}()
go func() {
- _ = env.DeletePod(namespace, currentPrimary, quickDelete)
+ _ = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete)
wg.Done()
}()
wg.Wait()
@@ -202,7 +204,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te
g.Expect(podList.Items[0].Name).NotTo(BeEquivalentTo(operatorPodName))
}, timeout).Should(Succeed())
Eventually(func() (bool, error) {
- return env.IsOperatorDeploymentReady()
+ return operator.IsDeploymentReady(env.Ctx, env.Client)
}, timeout).Should(BeTrue())
})
diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go
index de4fdcd812..ea0b11806f 100644
--- a/tests/e2e/pg_basebackup_test.go
+++ b/tests/e2e/pg_basebackup_test.go
@@ -19,7 +19,9 @@ package e2e
import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -46,16 +48,16 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
Context("can bootstrap via pg_basebackup", Ordered, func() {
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create the source Cluster
- srcClusterName, err = env.GetResourceNameFromYAML(srcCluster)
+ srcClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, srcCluster)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, srcClusterName, srcCluster, env)
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: srcClusterName,
- DatabaseName: utils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -63,17 +65,17 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
It("using basic authentication", func() {
// Create the destination Cluster
- dstClusterName, err := env.GetResourceNameFromYAML(dstClusterBasic)
+ dstClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, dstClusterBasic)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, dstClusterName, dstClusterBasic, env)
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, dstClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
secretName := dstClusterName + apiv1.ApplicationUserSecretSuffix
By("checking the dst cluster with auto generated app password connectable", func() {
AssertApplicationDatabaseConnection(namespace, dstClusterName,
- appUser, utils.AppDBName, "", secretName)
+ appUser, postgres.AppDBName, "", secretName)
})
By("update user application password for dst cluster and verify connectivity", func() {
@@ -83,7 +85,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
namespace,
dstClusterName,
appUser,
- utils.AppDBName,
+ postgres.AppDBName,
newPassword,
secretName)
})
@@ -92,18 +94,21 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: dstClusterName,
- DatabaseName: utils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
})
By("writing some new data to the dst cluster", func() {
- forward, conn, err := utils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
dstClusterName,
- utils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -118,7 +123,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: srcClusterName,
- DatabaseName: utils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
@@ -127,28 +132,31 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
It("using TLS authentication", func() {
// Create the destination Cluster
- dstClusterName, err := env.GetResourceNameFromYAML(dstClusterTLS)
+ dstClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, dstClusterTLS)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, dstClusterName, dstClusterTLS, env)
// We give more time than the usual 600s, since the recovery is slower
- AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, dstClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env)
By("checking data have been copied correctly", func() {
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: dstClusterName,
- DatabaseName: utils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
})
By("writing some new data to the dst cluster", func() {
- forward, conn, err := utils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
dstClusterName,
- utils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -163,7 +171,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: srcClusterName,
- DatabaseName: utils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go
index c0672f4479..44acfafed8 100644
--- a/tests/e2e/pg_data_corruption_test.go
+++ b/tests/e2e/pg_data_corruption_test.go
@@ -27,7 +27,13 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -44,7 +50,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
Skip("Test depth is lower than the amount requested for this test")
}
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
})
@@ -55,19 +61,19 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
var oldPrimaryPodName, oldPrimaryPVCName string
var err error
tableName := "test_pg_data_corruption"
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testsUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
By("gathering current primary pod and pvc", func() {
- oldPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ oldPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
oldPrimaryPodName = oldPrimaryPod.GetName()
// Get the PVC related to the pod
@@ -84,8 +90,9 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
By("corrupting primary pod by removing PGDATA", func() {
cmd := fmt.Sprintf("find %v/base/* -type f -delete", specs.PgDataPath)
- _, _, err = env.ExecCommandInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: oldPrimaryPodName,
}, nil,
@@ -95,7 +102,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
By("verifying failover happened after the primary pod PGDATA got corrupted", func() {
Eventually(func() (string, error) {
- newPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ newPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
@@ -126,7 +133,10 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
By("removing the old primary pod and its pvc", func() {
// Check if walStorage is enabled
- walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env)
+ walStorageEnabled, err := storage.IsWalStorageEnabled(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ )
Expect(err).ToNot(HaveOccurred())
// Force delete setting
@@ -160,7 +170,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func(
}
// Deleting old primary pod
- err = env.DeletePod(namespace, oldPrimaryPodName, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, oldPrimaryPodName, quickDelete)
Expect(err).ToNot(HaveOccurred())
// checking that the old primary pod is eventually gone
diff --git a/tests/e2e/pg_wal_volume_test.go b/tests/e2e/pg_wal_volume_test.go
index 612f936b59..75c4d52a59 100644
--- a/tests/e2e/pg_wal_volume_test.go
+++ b/tests/e2e/pg_wal_volume_test.go
@@ -30,7 +30,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -46,7 +48,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() {
)
var namespace string
verifyPgWal := func(namespace string) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(len(podList.Items), err).To(BeEquivalentTo(3))
By("checking that pg_wal PVC has been created", func() {
for _, pod := range podList.Items {
@@ -77,8 +79,9 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() {
".*[0-9]$")
timeout := 300
Eventually(func() (int, error, error) {
- out, _, err := env.ExecCommandInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: pod.GetName(),
}, nil,
@@ -93,7 +96,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() {
// Inline function to patch walStorage in existing cluster
updateWalStorage := func(namespace, clusterName string) {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
WalStorageClass := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
cluster.Spec.WalStorage = &apiv1.StorageConfiguration{
@@ -118,7 +121,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() {
const namespacePrefix = "pg-wal-volume-e2e"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileWithPgWal, env)
verifyPgWal(namespace)
@@ -128,7 +131,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() {
const namespacePrefix = "add-pg-wal-volume-e2e"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileWithoutPgWal, env)
By(fmt.Sprintf("adding pg_wal volume in existing cluster: %v", clusterName), func() {
@@ -137,7 +140,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() {
AssertPVCCount(namespace, clusterName, expectedPvcCount, 120)
AssertClusterEventuallyReachesPhase(namespace, clusterName,
[]string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 30)
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
AssertClusterPhaseIsConsistent(namespace, clusterName, []string{apiv1.PhaseHealthy}, 30)
verifyPgWal(namespace)
})
diff --git a/tests/e2e/pgbouncer_metrics_test.go b/tests/e2e/pgbouncer_metrics_test.go
index a33e6a7285..6fbf4d4fab 100644
--- a/tests/e2e/pgbouncer_metrics_test.go
+++ b/tests/e2e/pgbouncer_metrics_test.go
@@ -26,7 +26,8 @@ import (
pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -49,16 +50,16 @@ var _ = Describe("PGBouncer Metrics", Label(tests.LabelObservability), func() {
It("should retrieve the metrics exposed by a freshly created pooler of type pgBouncer and validate its content",
func() {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(cnpgCluster)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, cnpgCluster)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, cnpgCluster, env)
createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerBasicAuthRWSampleFile, 1)
- poolerName, err := env.GetResourceNameFromYAML(poolerBasicAuthRWSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerBasicAuthRWSampleFile)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
@@ -96,7 +97,7 @@ var _ = Describe("PGBouncer Metrics", Label(tests.LabelObservability), func() {
for _, pod := range podList.Items {
podName := pod.GetName()
- out, err := utils.RetrieveMetricsFromPgBouncer(env, pod)
+ out, err := proxy.RetrieveMetricsFromPgBouncer(env.Ctx, env.Interface, pod)
Expect(err).ToNot(HaveOccurred())
matches := metricsRegexp.FindAllString(out, -1)
Expect(matches).To(
diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go
index d07734c106..edbd2560d9 100644
--- a/tests/e2e/pgbouncer_test.go
+++ b/tests/e2e/pgbouncer_test.go
@@ -22,6 +22,9 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -47,14 +50,14 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity),
Context("no user-defined certificates", Ordered, func() {
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace("pgbouncer-auth-no-user-certs")
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-auth-no-user-certs")
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
})
JustAfterEach(func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
DeleteTableUsingPgBouncerService(namespace, clusterName, poolerBasicAuthRWSampleFile, env, primaryPod)
})
@@ -155,9 +158,9 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity),
caSecNameClient = "my-postgresql-client-ca"
)
// Create a cluster in a namespace that will be deleted after the test
- namespace, err = env.CreateUniqueTestNamespace("pgbouncer-separate-certificates")
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-separate-certificates")
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(sampleFileWithCertificate)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithCertificate)
Expect(err).ToNot(HaveOccurred())
// Create certificates secret for server
@@ -192,7 +195,7 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity),
})
func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) {
- poolerKey, err := env.GetResourceNamespacedNameFromYAML(sampleFile)
+ poolerKey, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
if err != nil {
return nil, err
}
@@ -200,14 +203,17 @@ func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) {
Expect(err).ToNot(HaveOccurred())
var podList corev1.PodList
- err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(poolerKey.Namespace),
- ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey.Name})
+ err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(""),
+ ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey})
Expect(err).ToNot(HaveOccurred())
Expect(len(podList.Items)).Should(BeEquivalentTo(1))
return &podList.Items[0], nil
}
func runShowHelpInPod(pod *corev1.Pod) error {
- _, _, err := env.ExecCommand(env.Ctx, *pod, "pgbouncer", nil, "psql", "-c", "SHOW HELP")
+ _, _, err := exec.Command(
+ env.Ctx, env.Interface, env.RestClientConfig, *pod,
+ "pgbouncer", nil, "psql", "-c", "SHOW HELP",
+ )
return err
}
diff --git a/tests/e2e/pgbouncer_types_test.go b/tests/e2e/pgbouncer_types_test.go
index dbdfd51325..5be71b0e8b 100644
--- a/tests/e2e/pgbouncer_types_test.go
+++ b/tests/e2e/pgbouncer_types_test.go
@@ -24,7 +24,8 @@ import (
pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -53,9 +54,9 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
// This cluster will be shared by the next tests
- namespace, err = env.CreateUniqueTestNamespace("pgbouncer-types")
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-types")
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
})
@@ -74,7 +75,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
})
By("verify that read-only pooler pgbouncer.ini contains the correct host service", func() {
- poolerName, err := env.GetResourceNameFromYAML(poolerCertificateROSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateROSampleFile)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
@@ -89,7 +90,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
})
By("verify that read-write pooler pgbouncer.ini contains the correct host service", func() {
- poolerName, err := env.GetResourceNameFromYAML(poolerCertificateRWSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateRWSampleFile)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
@@ -105,7 +106,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
By(fmt.Sprintf("scaling PGBouncer to %v instances", instances), func() {
command := fmt.Sprintf("kubectl scale pooler %s -n %s --replicas=%v",
poolerResourceNameRO, namespace, instances)
- _, _, err := utils.Run(command)
+ _, _, err := run.Run(command)
Expect(err).ToNot(HaveOccurred())
// verifying if PGBouncer pooler pods are ready after scale up
@@ -114,7 +115,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
// // scale up command for 3 replicas for read write
command = fmt.Sprintf("kubectl scale pooler %s -n %s --replicas=%v",
poolerResourceNameRW, namespace, instances)
- _, _, err = utils.Run(command)
+ _, _, err = run.Run(command)
Expect(err).ToNot(HaveOccurred())
// verifying if PGBouncer pooler pods are ready after scale up
@@ -126,7 +127,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
})
By("verifying that read-only pooler pgbouncer.ini contains the correct host service", func() {
- poolerName, err := env.GetResourceNameFromYAML(poolerCertificateROSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateROSampleFile)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
@@ -141,7 +142,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit
})
By("verifying that read-write pooler pgbouncer.ini contains the correct host service", func() {
- poolerName, err := env.GetResourceNameFromYAML(poolerCertificateRWSampleFile)
+ poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateRWSampleFile)
Expect(err).ToNot(HaveOccurred())
podList := &corev1.PodList{}
err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace),
diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go
index 9e7dae8567..c3858d9210 100644
--- a/tests/e2e/probes_test.go
+++ b/tests/e2e/probes_test.go
@@ -24,7 +24,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -81,7 +81,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() {
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "probes"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -119,7 +119,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() {
By("waiting for the cluster to restart", func() {
AssertClusterEventuallyReachesPhase(namespace, clusterName,
[]string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120)
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
})
By("checking the applied settings", func() {
@@ -161,7 +161,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() {
By("waiting for the cluster to restart", func() {
AssertClusterEventuallyReachesPhase(namespace, clusterName,
[]string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120)
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env)
})
By("checking the applied settings", func() {
diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go
index e6dccd6e66..ae1910fa29 100644
--- a/tests/e2e/publication_subscription_test.go
+++ b/tests/e2e/publication_subscription_test.go
@@ -24,7 +24,11 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -65,13 +69,13 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterManifest)
+ sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterManifest)
Expect(err).ToNot(HaveOccurred())
- destinationClusterName, err = env.GetResourceNameFromYAML(destinationClusterManifest)
+ destinationClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, destinationClusterManifest)
Expect(err).ToNot(HaveOccurred())
By("setting up source cluster", func() {
@@ -89,10 +93,14 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
// We need to make sure that publication/subscription have been removed before
// attempting to drop the database, otherwise the DROP DATABASE will fail because
// there's an active logical replication slot.
- destPrimaryPod, err := env.GetClusterPrimary(namespace, destinationClusterName)
+ destPrimaryPod, err := clusterutils.GetPrimary(
+ env.Ctx, env.Client,
+ namespace, destinationClusterName,
+ )
Expect(err).ToNot(HaveOccurred())
- _, _, err = env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: destPrimaryPod.Namespace,
PodName: destPrimaryPod.Name,
},
@@ -103,10 +111,14 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
)
Expect(err).ToNot(HaveOccurred())
- sourcePrimaryPod, err := env.GetClusterPrimary(namespace, sourceClusterName)
+ sourcePrimaryPod, err := clusterutils.GetPrimary(
+ env.Ctx, env.Client,
+ namespace, sourceClusterName,
+ )
Expect(err).ToNot(HaveOccurred())
- _, _, err = env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: sourcePrimaryPod.Namespace,
PodName: sourcePrimaryPod.Name,
},
@@ -119,15 +131,15 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
Expect(DeleteResourcesFromFile(namespace, destinationDatabaseManifest)).To(Succeed())
Expect(DeleteResourcesFromFile(namespace, sourceDatabaseManifest)).To(Succeed())
- Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, postgres.PostgresDBName,
databaseExistsQuery(dbname), "f"), 30).Should(Succeed())
- Eventually(QueryMatchExpectationPredicate(destPrimaryPod, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(destPrimaryPod, postgres.PostgresDBName,
databaseExistsQuery(dbname), "f"), 30).Should(Succeed())
})
assertCreateDatabase := func(namespace, clusterName, databaseManifest string) {
databaseObject := &apiv1.Database{}
- databaseObjectName, err := env.GetResourceNameFromYAML(databaseManifest)
+ databaseObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() {
@@ -148,17 +160,17 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
})
By(fmt.Sprintf("verifying the %s database has been created", databaseObject.Spec.Name), func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName,
+ Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName,
databaseExistsQuery(databaseObject.Spec.Name), "t"), 30).Should(Succeed())
})
}
// nolint:dupl
assertCreatePublication := func(namespace, clusterName, publicationManifest string) {
- pubObjectName, err := env.GetResourceNameFromYAML(publicationManifest)
+ pubObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, publicationManifest)
Expect(err).NotTo(HaveOccurred())
By("applying Publication CRD manifest", func() {
@@ -181,7 +193,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
})
By("verifying new publication has been created", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
@@ -191,7 +203,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
// nolint:dupl
assertCreateSubscription := func(namespace, clusterName, subscriptionManifest string) {
- subObjectName, err := env.GetResourceNameFromYAML(subscriptionManifest)
+ subObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, subscriptionManifest)
Expect(err).NotTo(HaveOccurred())
By("applying Subscription CRD manifest", func() {
@@ -214,7 +226,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
})
By("verifying new subscription has been created", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
@@ -237,8 +249,11 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
By("creating an empty table inside the destination database", func() {
query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName)
- _, err = testsUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname,
- apiv1.ApplicationUserSecretSuffix, query)
+ _, err = postgres.RunExecOverForward(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, destinationClusterName, dbname,
+ apiv1.ApplicationUserSecretSuffix, query,
+ )
Expect(err).ToNot(HaveOccurred())
})
@@ -257,14 +272,14 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
subscriptionReclaimPolicy = apiv1.SubscriptionReclaimRetain
}
// Get the object names
- pubObjectName, err := env.GetResourceNameFromYAML(pubManifest)
+ pubObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, pubManifest)
Expect(err).NotTo(HaveOccurred())
- subObjectName, err := env.GetResourceNameFromYAML(subManifest)
+ subObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, subManifest)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
- err = testsUtils.GetObject(
- env,
+ err = objects.Get(
+ env.Ctx, env.Client,
types.NamespacedName{Namespace: namespace, Name: pubObjectName},
&publication,
)
@@ -273,8 +288,8 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
err = env.Client.Update(env.Ctx, &publication)
g.Expect(err).ToNot(HaveOccurred())
- err = testsUtils.GetObject(
- env,
+ err = objects.Get(
+ env.Ctx, env.Client,
types.NamespacedName{Namespace: namespace, Name: subObjectName},
&subscription,
)
@@ -296,12 +311,12 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
})
By("removing the objects", func() {
- Expect(testsUtils.DeleteObject(env, &publication)).To(Succeed())
- Expect(testsUtils.DeleteObject(env, &subscription)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, &publication)).To(Succeed())
+ Expect(objects.Delete(env.Ctx, env.Client, &subscription)).To(Succeed())
})
By("verifying the publication reclaim policy outcome", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, sourceClusterName)
Expect(err).ToNot(HaveOccurred())
Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
@@ -309,7 +324,10 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub
})
By("verifying the subscription reclaim policy outcome", func() {
- primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName)
+ primaryPodInfo, err := clusterutils.GetPrimary(
+ env.Ctx, env.Client,
+ namespace, destinationClusterName,
+ )
Expect(err).ToNot(HaveOccurred())
Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname,
diff --git a/tests/e2e/pvc_deletion_test.go b/tests/e2e/pvc_deletion_test.go
index ac5d7032bc..05c5747a26 100644
--- a/tests/e2e/pvc_deletion_test.go
+++ b/tests/e2e/pvc_deletion_test.go
@@ -25,7 +25,8 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -48,7 +49,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() {
It("correctly manages PVCs", func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -79,7 +80,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() {
quickDelete := &ctrlclient.DeleteOptions{
GracePeriodSeconds: &quickDeletionPeriod,
}
- err = env.DeletePod(namespace, podName, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, podName, quickDelete)
Expect(err).ToNot(HaveOccurred())
// The pod should be back
@@ -122,7 +123,10 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() {
originalPVCUID := pvc.GetUID()
// Check if walStorage is enabled
- walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env)
+ walStorageEnabled, err := storage.IsWalStorageEnabled(
+ env.Ctx, env.Client,
+ namespace, clusterName,
+ )
Expect(err).ToNot(HaveOccurred())
// Force delete setting
@@ -149,7 +153,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() {
}
// Deleting primary pod
- err = env.DeletePod(namespace, podName, quickDelete)
+ err = podutils.Delete(env.Ctx, env.Client, namespace, podName, quickDelete)
Expect(err).ToNot(HaveOccurred())
// A new pod should be created
diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go
index 97b94c9781..dd38fd09a0 100644
--- a/tests/e2e/replica_mode_cluster_test.go
+++ b/tests/e2e/replica_mode_cluster_test.go
@@ -35,8 +35,15 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -75,7 +82,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
testTableName = "replica_mode_tls_auth"
)
- replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix)
+ replicaNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(replicaNamespace, srcClusterName, srcClusterSample, env)
@@ -87,7 +94,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
testTableName,
)
- replicaName, err := env.GetResourceNameFromYAML(replicaClusterSampleTLS)
+ replicaName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleTLS)
Expect(err).ToNot(HaveOccurred())
assertReplicaClusterTopology(replicaNamespace, replicaName)
@@ -108,9 +115,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
testTableName = "replica_mode_basic_auth"
)
- replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleBasicAuth)
+ replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleBasicAuth)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(replicaNamespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, srcClusterName, srcClusterSample, env)
@@ -153,7 +160,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
return nil
}
- namespace, err = env.CreateUniqueTestNamespace("replica-promotion-demotion")
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "replica-promotion-demotion")
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterOneName, clusterOneFile, env)
@@ -167,26 +174,27 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
// turn the src cluster into a replica
By("setting replica mode on the src cluster", func() {
- cluster, err := env.GetCluster(namespace, clusterOneName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterOneName)
Expect(err).ToNot(HaveOccurred())
updateTime := time.Now().Truncate(time.Second)
cluster.Spec.ReplicaCluster.Enabled = ptr.To(true)
err = env.Client.Update(ctx, cluster)
Expect(err).ToNot(HaveOccurred())
Eventually(func(g Gomega) {
- cluster, err := env.GetCluster(namespace, clusterOneName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterOneName)
g.Expect(err).ToNot(HaveOccurred())
condition := getReplicaClusterSwitchCondition(cluster.Status.Conditions)
g.Expect(condition).ToNot(BeNil())
g.Expect(condition.Status).To(Equal(metav1.ConditionTrue))
g.Expect(condition.LastTransitionTime.Time).To(BeTemporally(">=", updateTime))
}).WithTimeout(30 * time.Second).Should(Succeed())
- AssertClusterIsReady(namespace, clusterOneName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterOneName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("checking that src cluster is now a replica cluster", func() {
Eventually(func() error {
- clusterOnePrimary, err = env.GetClusterPrimary(namespace, clusterOneName)
+ clusterOnePrimary, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ clusterOneName)
return err
}, 30, 3).Should(Succeed())
AssertPgRecoveryMode(clusterOnePrimary, true)
@@ -194,17 +202,18 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
// turn the dst cluster into a primary
By("disabling the replica mode on the dst cluster", func() {
- cluster, err := env.GetCluster(namespace, clusterTwoName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterTwoName)
Expect(err).ToNot(HaveOccurred())
cluster.Spec.ReplicaCluster.Enabled = ptr.To(false)
err = env.Client.Update(ctx, cluster)
Expect(err).ToNot(HaveOccurred())
- AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("checking that dst cluster has been promoted", func() {
Eventually(func() error {
- clusterTwoPrimary, err = env.GetClusterPrimary(namespace, clusterTwoName)
+ clusterTwoPrimary, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ clusterTwoName)
return err
}, 30, 3).Should(Succeed())
AssertPgRecoveryMode(clusterTwoPrimary, false)
@@ -225,8 +234,10 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
// We need to copy the password changes over to the src Cluster, which is now a Replica
// Cluster, in order to connect using the "-app" secret.
By("updating the appUser secret of the src cluster", func() {
- _, appSecretPassword, err := testUtils.GetCredentials(clusterTwoName, namespace,
- apiv1.ApplicationUserSecretSuffix, env)
+ _, appSecretPassword, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ clusterTwoName, namespace,
+ apiv1.ApplicationUserSecretSuffix)
Expect(err).ToNot(HaveOccurred())
AssertUpdateSecret("password", appSecretPassword, clusterOneName+apiv1.ApplicationUserSecretSuffix,
namespace, clusterOneName, 30, env)
@@ -252,18 +263,19 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
testTableName = "replica_mode_archive"
)
- replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample)
+ replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSample)
Expect(err).ToNot(HaveOccurred())
- replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix)
+ replicaNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
replicaNamespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -284,14 +296,20 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
)
// Get primary from replica cluster
- primaryReplicaCluster, err := env.GetClusterPrimary(replicaNamespace, replicaClusterName)
+ primaryReplicaCluster, err := clusterutils.GetPrimary(
+ env.Ctx,
+ env.Client,
+ replicaNamespace,
+ replicaClusterName,
+ )
Expect(err).ToNot(HaveOccurred())
By("verify archive mode is set to 'always on' designated primary", func() {
query := "show archive_mode;"
Eventually(func() (string, error) {
- stdOut, _, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ stdOut, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryReplicaCluster.Namespace,
PodName: primaryReplicaCluster.Name,
},
@@ -318,16 +336,17 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
BeforeAll(func() {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -338,7 +357,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
})
// Create the cluster
- clusterName, err = env.GetResourceNameFromYAML(clusterSample)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSample)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterSample, env)
})
@@ -351,13 +370,15 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
By("creating a backup and waiting until it's completed", func() {
backupName := fmt.Sprintf("%v-backup", clusterName)
- backup, err := testUtils.CreateOnDemandBackup(
+ backup, err := backups.CreateOnDemand(
+ env.Ctx,
+ env.Client,
namespace,
clusterName,
backupName,
apiv1.BackupTargetStandby,
apiv1.BackupMethodBarmanObjectStore,
- env)
+ )
Expect(err).ToNot(HaveOccurred())
Eventually(func() (apiv1.BackupPhase, error) {
@@ -366,7 +387,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
Name: backupName,
}, backup)
return backup.Status.Phase, err
- }, testTimeouts[testUtils.BackupIsReady]).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted))
+ }, testTimeouts[timeouts.BackupIsReady]).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted))
})
By("creating a replica cluster from the backup", func() {
@@ -404,13 +425,15 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
By("creating a snapshot and waiting until it's completed", func() {
var err error
snapshotName := fmt.Sprintf("%v-snapshot", clusterName)
- backup, err = testUtils.CreateOnDemandBackup(
+ backup, err = backups.CreateOnDemand(
+ env.Ctx,
+ env.Client,
namespace,
clusterName,
snapshotName,
apiv1.BackupTargetStandby,
apiv1.BackupMethodVolumeSnapshot,
- env)
+ )
Expect(err).ToNot(HaveOccurred())
Eventually(func(g Gomega) {
@@ -421,7 +444,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
g.Expect(err).ToNot(HaveOccurred())
g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2))
g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted))
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
})
By("fetching the volume snapshots", func() {
@@ -432,11 +455,11 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
Expect(err).ToNot(HaveOccurred())
Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements)))
- envVars := testUtils.EnvVarsForSnapshots{
+ envVars := storage.EnvVarsForSnapshots{
DataSnapshot: snapshotDataEnv,
WalSnapshot: snapshotWalEnv,
}
- err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars)
+ err = storage.SetSnapshotNameAsEnv(&snapshotList, backup, envVars)
Expect(err).ToNot(HaveOccurred())
})
@@ -479,11 +502,12 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
})
validateReplication := func(namespace, clusterAName, clusterBName string) {
- primary, err := env.GetClusterPrimary(namespace, clusterBName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName)
Expect(err).ToNot(HaveOccurred())
- _, _, err = env.ExecQueryInInstancePod(
- testUtils.PodLocator{Namespace: namespace, PodName: primary.Name},
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: namespace, PodName: primary.Name},
"postgres",
"CREATE TABLE test_replication AS SELECT 1;",
)
@@ -491,14 +515,15 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
_ = switchWalAndGetLatestArchive(namespace, primary.Name)
Eventually(func(g Gomega) {
- podListA, err := env.GetClusterPodList(namespace, clusterAName)
+ podListA, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterAName)
g.Expect(err).ToNot(HaveOccurred())
- podListB, err := env.GetClusterPodList(namespace, clusterBName)
+ podListB, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterBName)
g.Expect(err).ToNot(HaveOccurred())
for _, podA := range podListA.Items {
- _, _, err = env.ExecQueryInInstancePod(
- testUtils.PodLocator{Namespace: namespace, PodName: podA.Name},
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: namespace, PodName: podA.Name},
"postgres",
"SELECT * FROM test_replication;",
)
@@ -506,34 +531,36 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
}
for _, podB := range podListB.Items {
- _, _, err = env.ExecQueryInInstancePod(
- testUtils.PodLocator{Namespace: namespace, PodName: podB.Name},
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: namespace, PodName: podB.Name},
"postgres",
"SELECT * FROM test_replication;",
)
g.Expect(err).ToNot(HaveOccurred())
}
- }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed())
+ }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed())
}
waitForTimelineIncrease := func(namespace, clusterName string, expectedTimeline int) bool {
return Eventually(func(g Gomega) {
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
- stdout, _, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{Namespace: namespace, PodName: primary.Name},
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: namespace, PodName: primary.Name},
"postgres",
"SELECT timeline_id FROM pg_control_checkpoint();",
)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(strings.TrimSpace(stdout)).To(Equal(fmt.Sprintf("%d", expectedTimeline)))
- }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed())
+ }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed())
}
DescribeTable("should demote and promote the clusters correctly",
func(clusterAFile string, clusterBFile string, expectedTimeline int) {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() error {
// Since we use multiple times the same cluster names for the same minio instance, we need to clean it up
@@ -553,12 +580,13 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
DeferCleanup(func() { close(stopLoad) })
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -570,15 +598,16 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
By("creating the A cluster", func() {
var err error
- clusterAName, err = env.GetResourceNameFromYAML(clusterAFile)
+ clusterAName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterAFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterAName, clusterAFile, env)
})
By("creating some load on the A cluster", func() {
- primary, err := env.GetClusterPrimary(namespace, clusterAName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterAName)
Expect(err).ToNot(HaveOccurred())
- _, _, err = env.ExecQueryInInstancePod(
- testUtils.PodLocator{Namespace: namespace, PodName: primary.Name},
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: namespace, PodName: primary.Name},
"postgres",
"CREATE TABLE switchover_load (i int);",
)
@@ -586,8 +615,9 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
go func() {
for {
- _, _, _ = env.ExecQueryInInstancePod(
- testUtils.PodLocator{Namespace: namespace, PodName: primary.Name},
+ _, _, _ = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{Namespace: namespace, PodName: primary.Name},
"postgres",
"INSERT INTO switchover_load SELECT generate_series(1, 10000)",
)
@@ -603,7 +633,8 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
})
By("backing up the A cluster", func() {
- backup, err := testUtils.CreateBackup(
+ backup, err := backups.Create(
+ env.Ctx, env.Client,
apiv1.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -615,12 +646,11 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
Cluster: apiv1.LocalObjectReference{Name: clusterAName},
},
},
- env,
)
Expect(err).ToNot(HaveOccurred())
// Speed up backup finalization
- primary, err := env.GetClusterPrimary(namespace, clusterAName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterAName)
Expect(err).ToNot(HaveOccurred())
_ = switchWalAndGetLatestArchive(namespace, primary.Name)
Expect(err).ToNot(HaveOccurred())
@@ -633,25 +663,25 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
}, backup)
return backup.Status.Phase, err
},
- testTimeouts[testUtils.BackupIsReady],
+ testTimeouts[timeouts.BackupIsReady],
).WithPolling(10 * time.Second).
Should(BeEquivalentTo(apiv1.BackupPhaseCompleted))
})
By("creating the B cluster from the backup", func() {
var err error
- clusterBName, err = env.GetResourceNameFromYAML(clusterBFile)
+ clusterBName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterBFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterBName, clusterBFile, env)
})
By("demoting A to a replica", func() {
- cluster, err := env.GetCluster(namespace, clusterAName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterAName)
Expect(err).ToNot(HaveOccurred())
oldCluster := cluster.DeepCopy()
cluster.Spec.ReplicaCluster.Primary = clusterBName
Expect(env.Client.Patch(env.Ctx, cluster, k8client.MergeFrom(oldCluster))).To(Succeed())
- podList, err := env.GetClusterPodList(namespace, clusterAName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterAName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
AssertPgRecoveryMode(&pod, true)
@@ -660,7 +690,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
var token, invalidToken string
By("getting the demotion token", func() {
- cluster, err := env.GetCluster(namespace, clusterAName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterAName)
Expect(err).ToNot(HaveOccurred())
token = cluster.Status.DemotionToken
})
@@ -675,7 +705,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
})
By("promoting B with the invalid token", func() {
- cluster, err := env.GetCluster(namespace, clusterBName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName)
Expect(err).ToNot(HaveOccurred())
oldCluster := cluster.DeepCopy()
@@ -686,25 +716,26 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
By("failing to promote B with the invalid token", func() {
Consistently(func(g Gomega) {
- pod, err := env.GetClusterPrimary(namespace, clusterBName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName)
g.Expect(err).ToNot(HaveOccurred())
- stdOut, _, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ stdOut, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testUtils.PostgresDBName,
+ postgres.PostgresDBName,
"select pg_is_in_recovery();")
g.Expect(err).ToNot(HaveOccurred())
g.Expect(strings.Trim(stdOut, "\n")).To(Equal("t"))
}, 60, 10).Should(Succeed())
- cluster, err := env.GetCluster(namespace, clusterBName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.Status.Phase).To(BeEquivalentTo(apiv1.PhaseUnrecoverable))
})
By("promoting B with the right token", func() {
- cluster, err := env.GetCluster(namespace, clusterBName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName)
Expect(err).ToNot(HaveOccurred())
oldCluster := cluster.DeepCopy()
cluster.Spec.ReplicaCluster.PromotionToken = token
@@ -717,10 +748,10 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f
})
By("verifying B contains the primary", func() {
- primary, err := env.GetClusterPrimary(namespace, clusterBName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName)
Expect(err).ToNot(HaveOccurred())
AssertPgRecoveryMode(primary, false)
- podList, err := env.GetClusterReplicas(namespace, clusterBName)
+ podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterBName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
AssertPgRecoveryMode(&pod, true)
@@ -748,7 +779,7 @@ func assertReplicaClusterTopology(namespace, clusterName string) {
standbys []string
)
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.Status.ReadyInstances).To(BeEquivalentTo(cluster.Spec.Instances))
@@ -760,8 +791,9 @@ func assertReplicaClusterTopology(namespace, clusterName string) {
standbys = funk.FilterString(cluster.Status.InstanceNames, func(name string) bool { return name != primary })
getStreamingInfo := func(podName string) ([]string, error) {
- stdout, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ stdout, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: podName,
},
@@ -804,8 +836,9 @@ func assertReplicaClusterTopology(namespace, clusterName string) {
By("verifying that the new primary is streaming from the source cluster", func() {
Eventually(func(g Gomega) {
- stdout, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ stdout, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary,
},
diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go
index c57404f93f..e832a1fada 100644
--- a/tests/e2e/replication_slot_test.go
+++ b/tests/e2e/replication_slot_test.go
@@ -24,7 +24,10 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/replicationslot"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -47,17 +50,19 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
It("Can enable and disable replication slots", func() {
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
By("enabling replication slot on cluster", func() {
- err := testsUtils.ToggleHAReplicationSlots(namespace, clusterName, true, env)
+ err := replicationslot.ToggleHAReplicationSlots(
+ env.Ctx, env.Client,
+ namespace, clusterName, true)
Expect(err).ToNot(HaveOccurred())
// Replication slots should be Enabled
Consistently(func() (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return false, err
}
@@ -73,13 +78,13 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
}
By("checking Primary HA slots exist and are active", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(
+ expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod(
+ env.Ctx, env.Client,
namespace,
clusterName,
primaryPod.GetName(),
- env,
)
Expect(err).ToNot(HaveOccurred())
AssertReplicationSlotsOnPod(namespace, clusterName, *primaryPod, expectedSlots, true, false)
@@ -90,12 +95,15 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
var err error
before := time.Now()
Eventually(func(g Gomega) {
- replicaPods, err = env.GetClusterReplicas(namespace, clusterName)
+ replicaPods, err = clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName)
g.Expect(len(replicaPods.Items), err).To(BeEquivalentTo(2))
}, 90, 2).Should(Succeed())
GinkgoWriter.Println("standby slot check succeeded in", time.Since(before))
for _, pod := range replicaPods.Items {
- expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env)
+ expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod(
+ env.Ctx, env.Client,
+ namespace, clusterName, pod.GetName(),
+ )
Expect(err).ToNot(HaveOccurred())
AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false)
}
@@ -106,16 +114,17 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
})
By("creating a physical replication slots on the primary", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
query := fmt.Sprintf("SELECT pg_create_physical_replication_slot('%s');", userPhysicalSlot)
- _, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
Expect(err).ToNot(HaveOccurred())
})
@@ -125,7 +134,7 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
var err error
before := time.Now()
Eventually(func(g Gomega) {
- replicaPods, err = env.GetClusterReplicas(namespace, clusterName)
+ replicaPods, err = clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName)
g.Expect(len(replicaPods.Items), err).To(BeEquivalentTo(2))
}, 90, 2).Should(Succeed())
GinkgoWriter.Println("standby slot check succeeded in", time.Since(before))
@@ -136,14 +145,18 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
})
By("disabling replication slot from running cluster", func() {
- err := testsUtils.ToggleHAReplicationSlots(namespace, clusterName, false, env)
+ err := replicationslot.ToggleHAReplicationSlots(
+ env.Ctx, env.Client,
+ namespace, clusterName, false)
Expect(err).ToNot(HaveOccurred())
- err = testsUtils.ToggleSynchronizeReplicationSlots(namespace, clusterName, false, env)
+ err = replicationslot.ToggleSynchronizeReplicationSlots(
+ env.Ctx, env.Client,
+ namespace, clusterName, false)
Expect(err).ToNot(HaveOccurred())
// Replication slots should be Disabled
Consistently(func() (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return false, err
}
@@ -159,11 +172,13 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() {
}
By("verifying slots have been removed from the cluster's pods", func() {
- pods, err := env.GetClusterPodList(namespace, clusterName)
+ pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range pods.Items {
Eventually(func(g Gomega) error {
- slotOnPod, err := testsUtils.GetReplicationSlotsOnPod(namespace, pod.GetName(), env)
+ slotOnPod, err := replicationslot.GetReplicationSlotsOnPod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, pod.GetName(), postgres.AppDBName)
if err != nil {
return err
}
diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go
index 7b3bde3ae9..2440e0e299 100644
--- a/tests/e2e/rolling_update_test.go
+++ b/tests/e2e/rolling_update_test.go
@@ -33,6 +33,10 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -45,12 +49,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
Skip("Test depth is lower than the amount requested for this test")
}
})
- // gatherClusterInfo returns the current lists of pods, pod UIDs and pvc UIDs in a given cluster
+ // gatherClusterInfo returns the current lists of podutils, pod UIDs and pvc UIDs in a given cluster
gatherClusterInfo := func(namespace string, clusterName string) ([]string, []types.UID, []types.UID, error) {
var podNames []string
var podUIDs []types.UID
var pvcUIDs []types.UID
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
podNames = append(podNames, pod.GetName())
@@ -73,7 +77,10 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
namespace string, clusterName string, imageName string, expectedInstances int, timeout int,
) {
Eventually(func() (int32, error) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
+ if err != nil {
+ return 0, err
+ }
updatedPods := int32(0)
for _, pod := range podList.Items {
// We need to check if a pod is ready, otherwise we
@@ -95,7 +102,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
}, timeout).Should(BeEquivalentTo(expectedInstances))
}
- // Verify that after an update all the pods are ready and running
+ // Verify that after an update all the podutils are ready and running
// an updated image
AssertUpdateImage := func(namespace string, clusterName string) {
// TODO: the nodes are downloading the image sequentially,
@@ -113,7 +120,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
var cluster *apiv1.Cluster
Eventually(func(g Gomega) error {
var err error
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.ImageName = updatedImageName
@@ -123,15 +130,16 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// All the postgres containers should have the updated image
AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, timeout)
- // Setting up a cluster with three pods is slow, usually 200-600s
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ // Setting up a cluster with three podutils is slow, usually 200-600s
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
}
// Verify that the pod name changes amount to an expected number
- AssertChangedNames := func(namespace string, clusterName string,
+ AssertChangedNames := func(
+ namespace string, clusterName string,
originalPodNames []string, expectedUnchangedNames int,
) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
matchingNames := 0
for _, pod := range podList.Items {
@@ -147,10 +155,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
}
// Verify that the pod UIDs changes are the expected number
- AssertNewPodsUID := func(namespace string, clusterName string,
+ AssertNewPodsUID := func(
+ namespace string, clusterName string,
originalPodUID []types.UID, expectedUnchangedUIDs int,
) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
matchingUID := 0
for _, pod := range podList.Items {
@@ -166,10 +175,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
}
// Verify that the PVC UIDs changes are the expected number
- AssertChangedPvcUID := func(namespace string, clusterName string,
+ AssertChangedPvcUID := func(
+ namespace string, clusterName string,
originalPVCUID []types.UID, expectedUnchangedPvcUIDs int,
) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
matchingPVC := 0
for _, pod := range podList.Items {
@@ -192,14 +202,15 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
}
// Verify that the -rw endpoint points to the expected primary
- AssertPrimary := func(namespace, clusterName string,
+ AssertPrimary := func(
+ namespace, clusterName string,
oldPrimaryPod *corev1.Pod, expectNewPrimaryIdx bool,
) {
var cluster *apiv1.Cluster
var err error
Eventually(func(g Gomega) {
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
if expectNewPrimaryIdx {
g.Expect(cluster.Status.CurrentPrimary).ToNot(BeEquivalentTo(oldPrimaryPod.Name))
@@ -209,7 +220,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
}, RetryTimeout).Should(Succeed())
// Get the new current primary Pod
- currentPrimaryPod, err := env.GetPod(namespace, cluster.Status.CurrentPrimary)
+ currentPrimaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, cluster.Status.CurrentPrimary)
Expect(err).ToNot(HaveOccurred())
endpointName := clusterName + "-rw"
@@ -226,8 +237,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
}, timeout).Should(BeEquivalentTo(currentPrimaryPod.Status.PodIP))
}
- // Verify that the IPs of the pods match the ones in the -r endpoint and
- // that the amount of pods is the expected one
+ // Verify that the IPs of the podutils match the ones in the -r endpoint and
+ // that the amount of podutils is the expected one
AssertReadyEndpoint := func(namespace string, clusterName string, expectedEndpoints int) {
endpointName := clusterName + "-r"
endpoint := &corev1.Endpoints{}
@@ -238,7 +249,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
err := env.Client.Get(env.Ctx, endpointNamespacedName,
endpoint)
Expect(err).ToNot(HaveOccurred())
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(expectedEndpoints, err).To(BeEquivalentTo(len(podList.Items)))
matchingIP := 0
for _, pod := range podList.Items {
@@ -252,7 +263,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
Expect(matchingIP).To(BeEquivalentTo(expectedEndpoints))
}
- AssertRollingUpdate := func(namespace string, clusterName string,
+ AssertRollingUpdate := func(
+ namespace string, clusterName string,
sampleFile string, expectNewPrimaryIdx bool,
) {
var originalPodNames []string
@@ -262,12 +274,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
AssertCreateCluster(namespace, clusterName, sampleFile, env)
// Gather the number of instances in this Cluster
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
clusterInstances := cluster.Spec.Instances
// Gather the original primary Pod
- originalPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ originalPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("Gathering info on the current state", func() {
@@ -277,18 +289,18 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
By("updating the cluster definition", func() {
AssertUpdateImage(namespace, clusterName)
})
- // Since we're using a pvc, after the update the pods should
+ // Since we're using a pvc, after the update the podutils should
// have been created with the same name using the same pvc.
// Here we check that the names we've saved at the beginning
- // of the It are the same names of the current pods.
- By("checking that the names of the pods have not changed", func() {
+ // of the It are the same names of the current podutils.
+ By("checking that the names of the podutils have not changed", func() {
AssertChangedNames(namespace, clusterName, originalPodNames, clusterInstances)
})
// Even if they have the same names, they should have different
- // UIDs, as the pods are new. Here we check that the UID
+ // UIDs, as the podutils are new. Here we check that the UID
// we've saved at the beginning of the It don't match the
// current ones.
- By("checking that the pods are new ones", func() {
+ By("checking that the podutils are new ones", func() {
AssertNewPodsUID(namespace, clusterName, originalPodUID, 0)
})
// The PVC get reused, so they should have the same UID
@@ -303,7 +315,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
By("having the current primary on the new TargetPrimary", func() {
AssertPrimary(namespace, clusterName, originalPrimaryPod, expectNewPrimaryIdx)
})
- // Check that the new pods are included in the endpoint
+ // Check that the new podutils are included in the endpoint
By("having each pod included in the -r service", func() {
AssertReadyEndpoint(namespace, clusterName, clusterInstances)
})
@@ -409,15 +421,15 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
Expect(err).ToNot(HaveOccurred())
err = env.Client.Create(env.Ctx, cluster)
Expect(err).ToNot(HaveOccurred())
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
// Gather the number of instances in this Cluster
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
clusterInstances := cluster.Spec.Instances
// Gather the original primary Pod
- originalPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ originalPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("Gathering info on the current state", func() {
@@ -431,20 +443,20 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
Expect(err).ToNot(HaveOccurred())
})
AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, 900)
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
- // Since we're using a pvc, after the update the pods should
+ // Since we're using a pvc, after the update the podutils should
// have been created with the same name using the same pvc.
// Here we check that the names we've saved at the beginning
- // of the It are the same names of the current pods.
- By("checking that the names of the pods have not changed", func() {
+ // of the It are the same names of the current podutils.
+ By("checking that the names of the podutils have not changed", func() {
AssertChangedNames(namespace, clusterName, originalPodNames, clusterInstances)
})
// Even if they have the same names, they should have different
- // UIDs, as the pods are new. Here we check that the UID
+ // UIDs, as the podutils are new. Here we check that the UID
// we've saved at the beginning of the It don't match the
// current ones.
- By("checking that the pods are new ones", func() {
+ By("checking that the podutils are new ones", func() {
AssertNewPodsUID(namespace, clusterName, originalPodUID, 0)
})
// The PVC get reused, so they should have the same UID
@@ -459,7 +471,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
By("having the current primary on the new TargetPrimary", func() {
AssertPrimary(namespace, clusterName, originalPrimaryPod, expectNewPrimaryIdx)
})
- // Check that the new pods are included in the endpoint
+ // Check that the new podutils are included in the endpoint
By("having each pod included in the -r service", func() {
AssertReadyEndpoint(namespace, clusterName, clusterInstances)
})
@@ -477,9 +489,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// the image name has to be tagged as foo:MAJ.MIN. We'll update
// it to foo:MAJ, representing the latest minor.
// Create a cluster in a namespace we'll delete after the test
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertRollingUpdate(namespace, clusterName, sampleFile, true)
})
@@ -496,9 +508,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// the image name has to be tagged as foo:MAJ.MIN. We'll update
// it to foo:MAJ, representing the latest minor.
// Create a cluster in a namespace we'll delete after the test
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertRollingUpdate(namespace, clusterName, sampleFile, false)
})
@@ -510,9 +522,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
sampleFile = fixturesDir + "/rolling_updates/cluster-using-primary-update-method.yaml.template"
)
It("can do rolling update", func() {
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertRollingUpdate(namespace, clusterName, sampleFile, false)
})
@@ -555,7 +567,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// the image name has to be tagged as foo:MAJ.MIN. We'll update
// it to foo:MAJ, representing the latest minor.
// Create a cluster in a namespace we'll delete after the test
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Create a new image catalog and a new cluster
@@ -575,7 +587,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// the image name has to be tagged as foo:MAJ.MIN. We'll update
// it to foo:MAJ, representing the latest minor.
// Create a cluster in a namespace we'll delete after the test
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
catalog := newImageCatalog(namespace, clusterName, pgVersion.Major(), preRollingImg)
@@ -611,7 +623,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// the image name has to be tagged as foo:MAJ.MIN. We'll update
// it to foo:MAJ, representing the latest minor.
// Create a cluster in a namespace we'll delete after the test
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 3, storageClass)
@@ -629,7 +641,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
// the image name has to be tagged as foo:MAJ.MIN. We'll update
// it to foo:MAJ, representing the latest minor.
// Create a cluster in a namespace we'll delete after the test
- namespace, err := env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 1, storageClass)
diff --git a/tests/e2e/scaling_test.go b/tests/e2e/scaling_test.go
index 000030e850..d47f86ed30 100644
--- a/tests/e2e/scaling_test.go
+++ b/tests/e2e/scaling_test.go
@@ -20,7 +20,7 @@ import (
"fmt"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -47,7 +47,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati
const namespacePrefix = "cluster-scale-e2e-with-slots"
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileWithReplicationSlots, env)
@@ -55,7 +55,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati
// Add a node to the cluster and verify the cluster has one more
// element
By("adding an instance to the cluster", func() {
- _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName))
+ _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName))
Expect(err).ToNot(HaveOccurred())
timeout := 300
AssertClusterIsReady(namespace, clusterName, timeout, env)
@@ -66,7 +66,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati
// Remove a node from the cluster and verify the cluster has one
// element less
By("removing an instance from the cluster", func() {
- _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName))
+ _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName))
Expect(err).ToNot(HaveOccurred())
timeout := 60
AssertClusterIsReady(namespace, clusterName, timeout, env)
@@ -84,14 +84,14 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "cluster-scale-e2e"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileWithoutReplicationSlots, env)
// Add a node to the cluster and verify the cluster has one more
// element
By("adding an instance to the cluster", func() {
- _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName))
+ _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName))
Expect(err).ToNot(HaveOccurred())
timeout := 300
AssertClusterIsReady(namespace, clusterName, timeout, env)
@@ -101,7 +101,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati
// Remove a node from the cluster and verify the cluster has one
// element less
By("removing an instance from the cluster", func() {
- _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName))
+ _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName))
Expect(err).ToNot(HaveOccurred())
timeout := 60
AssertClusterIsReady(namespace, clusterName, timeout, env)
diff --git a/tests/e2e/storage_expansion_test.go b/tests/e2e/storage_expansion_test.go
index 4713dde4c3..283a4383fd 100644
--- a/tests/e2e/storage_expansion_test.go
+++ b/tests/e2e/storage_expansion_test.go
@@ -21,7 +21,8 @@ import (
"os"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -53,7 +54,10 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() {
// Initializing namespace variable to be used in test case
namespacePrefix = "storage-expansion-true"
// Extracting bool value of AllowVolumeExpansion
- allowExpansion, err := utils.GetStorageAllowExpansion(defaultStorageClass, env)
+ allowExpansion, err := storage.GetStorageAllowExpansion(
+ env.Ctx, env.Client,
+ defaultStorageClass,
+ )
Expect(err).ToNot(HaveOccurred())
if (allowExpansion == nil) || (*allowExpansion == false) {
Skip(fmt.Sprintf("AllowedVolumeExpansion is false on %v", defaultStorageClass))
@@ -63,7 +67,7 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() {
It("expands PVCs via online resize", func() {
var err error
// Creating namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Creating a cluster with three nodes
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -76,7 +80,10 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() {
// Initializing namespace variable to be used in test case
namespacePrefix = "storage-expansion-false"
// Extracting bool value of AllowVolumeExpansion
- allowExpansion, err := utils.GetStorageAllowExpansion(defaultStorageClass, env)
+ allowExpansion, err := storage.GetStorageAllowExpansion(
+ env.Ctx, env.Client,
+ defaultStorageClass,
+ )
Expect(err).ToNot(HaveOccurred())
if (allowExpansion != nil) && (*allowExpansion == true) {
Skip(fmt.Sprintf("AllowedVolumeExpansion is true on %v", defaultStorageClass))
@@ -85,14 +92,14 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() {
It("expands PVCs via offline resize", func() {
var err error
// Creating namespace
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
By("update cluster for resizeInUseVolumes as false", func() {
// Updating cluster with 'resizeInUseVolumes' sets to 'false' in storage.
// Check if operator does not return error
Eventually(func() error {
- _, _, err = utils.RunUnchecked("kubectl patch cluster " + clusterName + " -n " + namespace +
+ _, _, err = run.Unchecked("kubectl patch cluster " + clusterName + " -n " + namespace +
" -p '{\"spec\":{\"storage\":{\"resizeInUseVolumes\":false}}}' --type=merge")
if err != nil {
return err
diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go
index fa637fffce..46dbcf1491 100644
--- a/tests/e2e/suite_test.go
+++ b/tests/e2e/suite_test.go
@@ -35,9 +35,14 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
cnpgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/cloudvendors"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/sternmultitailer"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -45,19 +50,19 @@ import (
const (
fixturesDir = "./fixtures"
- RetryTimeout = utils.RetryTimeout
- PollingTime = utils.PollingTime
+ RetryTimeout = environment.RetryTimeout
+ PollingTime = objects.PollingTime
)
var (
- env *utils.TestingEnvironment
+ env *environment.TestingEnvironment
testLevelEnv *tests.TestEnvLevel
- testCloudVendorEnv *utils.TestEnvVendor
+ testCloudVendorEnv *cloudvendors.TestEnvVendor
expectedOperatorPodName string
operatorPodWasRenamed bool
operatorWasRestarted bool
quickDeletionPeriod = int64(1)
- testTimeouts map[utils.Timeout]int
+ testTimeouts map[timeouts.Timeout]int
minioEnv = &minio.Env{
Namespace: "minio",
ServiceName: "minio-service.minio",
@@ -68,21 +73,21 @@ var (
var _ = SynchronizedBeforeSuite(func() []byte {
var err error
- env, err = utils.NewTestingEnvironment()
+ env, err = environment.NewTestingEnvironment()
Expect(err).ShouldNot(HaveOccurred())
// Start stern to write the logs of every pod we are interested in. Since we don't have a way to have a selector
// matching both the operator's and the clusters' pods, we need to start stern twice.
sternClustersCtx, sternClusterCancel := context.WithCancel(env.Ctx)
sternClusterDoneChan := sternmultitailer.StreamLogs(sternClustersCtx, env.Interface, clusterPodsLabelSelector(),
- env.SternLogDir)
+ namespaces.SternLogDirectory)
DeferCleanup(func() {
sternClusterCancel()
<-sternClusterDoneChan
})
sternOperatorCtx, sternOperatorCancel := context.WithCancel(env.Ctx)
sternOperatorDoneChan := sternmultitailer.StreamLogs(sternOperatorCtx, env.Interface, operatorPodsLabelSelector(),
- env.SternLogDir)
+ namespaces.SternLogDirectory)
DeferCleanup(func() {
sternOperatorCancel()
<-sternOperatorDoneChan
@@ -92,13 +97,13 @@ var _ = SynchronizedBeforeSuite(func() []byte {
_ = appsv1.AddToScheme(env.Scheme)
// Set up a global MinIO service on his own namespace
- err = env.CreateNamespace(minioEnv.Namespace)
+ err = namespaces.CreateNamespace(env.Ctx, env.Client, minioEnv.Namespace)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() {
- err := env.DeleteNamespaceAndWait(minioEnv.Namespace, 300)
+ err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, minioEnv.Namespace, 300)
Expect(err).ToNot(HaveOccurred())
})
- minioEnv.Timeout = uint(testTimeouts[utils.MinioInstallation])
+ minioEnv.Timeout = uint(testTimeouts[timeouts.MinioInstallation])
minioClient, err := minio.Deploy(minioEnv, env)
Expect(err).ToNot(HaveOccurred())
@@ -118,7 +123,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
var err error
// We are creating new testing env object again because above testing env can not serialize and
// accessible to all nodes (specs)
- if env, err = utils.NewTestingEnvironment(); err != nil {
+ if env, err = environment.NewTestingEnvironment(); err != nil {
panic(err)
}
@@ -129,11 +134,11 @@ var _ = SynchronizedBeforeSuite(func() []byte {
panic(err)
}
- if testTimeouts, err = utils.Timeouts(); err != nil {
+ if testTimeouts, err = timeouts.Timeouts(); err != nil {
panic(err)
}
- if testCloudVendorEnv, err = utils.TestCloudVendor(); err != nil {
+ if testCloudVendorEnv, err = cloudvendors.TestCloudVendor(); err != nil {
panic(err)
}
@@ -149,7 +154,7 @@ var _ = ReportAfterSuite("Gathering failed reports", func(report Report) {
// Keep the logs of the operator and the clusters in case of failure
// If everything is skipped, env has not been initialized, and we'll have nothing to clean up
if report.SuiteSucceeded && env != nil {
- err := fileutils.RemoveDirectory(env.SternLogDir)
+ err := fileutils.RemoveDirectory(namespaces.SternLogDirectory)
Expect(err).ToNot(HaveOccurred())
}
})
@@ -163,7 +168,7 @@ var _ = BeforeEach(func() {
return
}
- operatorPod, err := env.GetOperatorPod()
+ operatorPod, err := operator.GetPod(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
if operatorPodWasRenamed {
@@ -196,14 +201,14 @@ var _ = AfterEach(func() {
if len(breakingLabelsInCurrentTest.([]string)) != 0 {
return
}
- operatorPod, err := env.GetOperatorPod()
+ operatorPod, err := operator.GetPod(env.Ctx, env.Client)
Expect(err).ToNot(HaveOccurred())
- wasRenamed := utils.OperatorPodRenamed(operatorPod, expectedOperatorPodName)
+ wasRenamed := operator.PodRenamed(operatorPod, expectedOperatorPodName)
if wasRenamed {
operatorPodWasRenamed = true
Fail("operator was renamed")
}
- wasRestarted := utils.OperatorPodRestarted(operatorPod)
+ wasRestarted := operator.PodRestarted(operatorPod)
if wasRestarted {
operatorWasRestarted = true
Fail("operator was restarted")
diff --git a/tests/e2e/switchover_test.go b/tests/e2e/switchover_test.go
index 4801a8f5ff..dc773c16ed 100644
--- a/tests/e2e/switchover_test.go
+++ b/tests/e2e/switchover_test.go
@@ -18,6 +18,7 @@ package e2e
import (
"github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -41,9 +42,9 @@ var _ = Describe("Switchover", Serial, Label(tests.LabelSelfHealing), func() {
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "switchover-e2e-with-slots"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFileWithReplicationSlots)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithReplicationSlots)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileWithReplicationSlots, env)
@@ -57,9 +58,9 @@ var _ = Describe("Switchover", Serial, Label(tests.LabelSelfHealing), func() {
// Create a cluster in a namespace we'll delete after the test
const namespacePrefix = "switchover-e2e"
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFileWithoutReplicationSlots)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithoutReplicationSlots)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFileWithoutReplicationSlots, env)
diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go
index fcd321874e..c6bc7dc3ef 100644
--- a/tests/e2e/syncreplicas_test.go
+++ b/tests/e2e/syncreplicas_test.go
@@ -27,7 +27,11 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -43,11 +47,12 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
getSyncReplicationCount := func(namespace, clusterName, syncState string, expectedCount int) {
Eventually(func() (int, error, error) {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- out, stdErr, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ out, stdErr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.GetName(),
},
@@ -63,11 +68,12 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
compareSynchronousStandbyNames := func(namespace, clusterName, element string) {
Eventually(func() string {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- out, stdErr, err := env.ExecQueryInInstancePod(
- utils.PodLocator{
+ out, stdErr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.GetName(),
},
@@ -88,11 +94,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
namespacePrefix = "legacy-sync-replicas-e2e"
sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica-legacy.yaml.template"
)
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -104,7 +110,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
By("checking that synchronous_standby_names reflects cluster's changes", func() {
// Set MaxSyncReplicas to 1
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.MaxSyncReplicas = 1
@@ -112,13 +118,13 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
}, RetryTimeout, 5).Should(Succeed())
// Scale the cluster down to 2 pods
- _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace,
+ _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace,
clusterName))
Expect(err).ToNot(HaveOccurred())
timeout := 120
// Wait for pod 3 to be completely terminated
Eventually(func() (int, error) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
return len(podList.Items), err
}, timeout).Should(BeEquivalentTo(2))
@@ -127,14 +133,14 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
compareSynchronousStandbyNames(namespace, clusterName, "ANY 1")
})
By("failing when SyncReplicas fields are invalid", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Expect an error. MaxSyncReplicas must be lower than the number of instances
cluster.Spec.MaxSyncReplicas = 2
err = env.Client.Update(env.Ctx, cluster)
Expect(err).To(HaveOccurred())
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Expect an error. MinSyncReplicas must be lower than MaxSyncReplicas
cluster.Spec.MinSyncReplicas = 2
@@ -148,7 +154,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
namespacePrefix = "sync-replicas-statstatements"
sampleFile = fixturesDir + "/sync_replicas/cluster-pgstatstatements.yaml.template"
)
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
// Are extensions a problem with synchronous replication? No, absolutely not,
@@ -159,7 +165,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
// bootstrapping the cluster, the CREATE EXTENSION instruction will block
// the primary since the desired number of synchronous replicas (even when 1)
// is not met.
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -180,11 +186,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
namespacePrefix = "sync-replicas-e2e"
sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica.yaml.template"
)
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -195,7 +201,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
By("setting MaxStandbyNamesFromCluster to 1 and decreasing to 1 the sync replicas required", func() {
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = ptr.To(1)
cluster.Spec.PostgresConfiguration.Synchronous.Number = 1
@@ -208,7 +214,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
By("switching to MethodFirst (priority-based)", func() {
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.Synchronous.Method = apiv1.SynchronousReplicaConfigurationMethodFirst
return env.Client.Update(env.Ctx, cluster)
@@ -220,7 +226,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
By("by properly setting standbyNamesPre and standbyNamesPost", func() {
Eventually(func(g Gomega) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = nil
cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre = []string{"preSyncReplica"}
@@ -238,10 +244,10 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
namespacePrefix = "sync-replicas-preferred"
sampleFile = fixturesDir + "/sync_replicas/preferred.yaml.template"
)
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
@@ -251,31 +257,33 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() {
})
By("fencing a replica and verifying we have only 1 quorum-based replica", func() {
- Expect(utils.FencingOn(env, fmt.Sprintf("%v-3", clusterName),
- namespace, clusterName, utils.UsingAnnotation)).Should(Succeed())
+ Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName),
+ namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed())
getSyncReplicationCount(namespace, clusterName, "quorum", 1)
compareSynchronousStandbyNames(namespace, clusterName, "ANY 1")
})
By("fencing the second replica and verifying we unset synchronous_standby_names", func() {
- Expect(utils.FencingOn(env, fmt.Sprintf("%v-2", clusterName),
- namespace, clusterName, utils.UsingAnnotation)).Should(Succeed())
+ Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName),
+ namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed())
Eventually(func() string {
commandTimeout := time.Second * 10
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- stdout, _, err := env.ExecCommand(env.Ctx, *primary, specs.PostgresContainerName,
- &commandTimeout,
- "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names")
+ stdout, _, err := exec.Command(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ *primary, specs.PostgresContainerName, &commandTimeout,
+ "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names",
+ )
Expect(err).ToNot(HaveOccurred())
return strings.Trim(stdout, "\n")
}, 160).Should(BeEmpty())
})
By("unfenicing the replicas and verifying we have 2 quorum-based replicas", func() {
- Expect(utils.FencingOff(env, fmt.Sprintf("%v-3", clusterName),
- namespace, clusterName, utils.UsingAnnotation)).Should(Succeed())
- Expect(utils.FencingOff(env, fmt.Sprintf("%v-2", clusterName),
- namespace, clusterName, utils.UsingAnnotation)).Should(Succeed())
+ Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName),
+ namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed())
+ Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName),
+ namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed())
getSyncReplicationCount(namespace, clusterName, "quorum", 2)
compareSynchronousStandbyNames(namespace, clusterName, "ANY 2")
})
diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go
index afbaa42c13..53c1dc3e62 100644
--- a/tests/e2e/tablespaces_test.go
+++ b/tests/e2e/tablespaces_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
+ "context"
"fmt"
"os"
"path"
@@ -32,11 +33,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -68,13 +79,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
clusterSetup := func(namespace, clusterManifest string) {
var err error
- clusterName, err = env.GetResourceNameFromYAML(clusterManifest)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest)
Expect(err).ToNot(HaveOccurred())
By("creating a cluster and having it be ready", func() {
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
})
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
}
@@ -90,17 +101,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
)
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// We create the MinIO credentials required to login into the system
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -114,28 +126,28 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
It("can verify tablespaces and PVC were created", func() {
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.Short])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.Short])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.Short])
- AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[testUtils.Short])
- AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[testUtils.Short])
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.Short])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.Short])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short])
+ AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[timeouts.Short])
+ AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[timeouts.Short])
AssertTablespaceAndOwnerExist(cluster, "atablespace", "app")
AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "dante")
})
It("can update the cluster by change the owner of tablesapce", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
updateTablespaceOwner(cluster, "anothertablespace", "alpha")
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
- AssertTablespaceReconciled(namespace, clusterName, "anothertablespace", testTimeouts[testUtils.Short])
+ AssertTablespaceReconciled(namespace, clusterName, "anothertablespace", testTimeouts[timeouts.Short])
AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha")
})
It("can update the cluster to set a tablespace as temporary", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("setting the first tablespace as temporary", func() {
@@ -158,13 +170,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
It("can create the backup and verify content in the object store", func() {
- backupName, err = env.GetResourceNameFromYAML(clusterBackupManifest)
+ backupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterBackupManifest)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() {
- testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady],
- env)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.Execute(
+ env.Ctx, env.Client, env.Scheme,
+ namespace, clusterBackupManifest, false,
+ testTimeouts[timeouts.BackupIsReady],
+ )
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
})
By("verifying the number of tars in minio", func() {
@@ -173,32 +188,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
By("verifying backup status", func() {
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
- Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return "", err
- }
- return cluster.Status.LastSuccessfulBackup, err
- }, 30).ShouldNot(BeEmpty())
- Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return "", err
- }
- return cluster.Status.LastFailedBackup, err
- }, 30).Should(BeEmpty())
})
})
It("can update the cluster adding a new tablespace and backup again", func() {
By("adding a new tablespace to the cluster", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
addTablespaces(cluster, []apiv1.TablespaceConfiguration{
@@ -214,26 +215,26 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
},
})
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
})
By("verifying there are 3 tablespaces and PVCs were created", func() {
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.Spec.Tablespaces).To(HaveLen(3))
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[testUtils.PodRollout])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout])
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[timeouts.PodRollout])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout])
AssertTablespaceAndOwnerExist(cluster, "atablespace", "app")
AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha")
AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante")
})
By("waiting for the cluster to be ready", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("verifying expected number of PVCs for tablespaces", func() {
@@ -242,14 +243,15 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
By("creating a new backup and verifying backup is ready", func() {
- backupCondition, err := testUtils.GetConditionsInClusterStatus(
+ backupCondition, err := backups.GetConditionsInClusterStatus(
+ env.Ctx,
+ env.Client,
namespace,
clusterName,
- env,
apiv1.ConditionBackup,
)
Expect(err).ShouldNot(HaveOccurred())
- _, stderr, err := testUtils.Run(
+ _, stderr, err := run.Run(
fmt.Sprintf("kubectl cnpg backup %s -n %s --backup-name %s",
clusterName, namespace, fullBackupName))
Expect(stderr).To(BeEmpty())
@@ -262,10 +264,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
)
// TODO: this is to force a CHECKPOINT when we run the backup on standby.
- // This should be better handled inside ExecuteBackup
+ // This should be better handled inside Execute
AssertArchiveWalOnMinio(namespace, clusterName, clusterName)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
})
By("verifying the number of tars in the latest base backup", func() {
@@ -278,21 +280,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
By("verifying backup status", func() {
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
return cluster.Status.LastSuccessfulBackup, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return "", err
}
@@ -309,23 +311,23 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
const clusterRestoreFromBarmanManifest string = fixturesDir +
"/tablespaces/restore-cluster-from-barman.yaml.template"
- restoredClusterName, err := env.GetResourceNameFromYAML(clusterRestoreFromBarmanManifest)
+ restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterRestoreFromBarmanManifest)
Expect(err).ToNot(HaveOccurred())
By("creating the cluster to be restored through snapshot", func() {
CreateResourceFromFile(namespace, clusterRestoreFromBarmanManifest)
// A delay of 5 min when restoring with tablespaces is normal, let's give extra time
- AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReadySlow],
+ AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow],
env)
})
By("verifying that tablespaces and PVC were created", func() {
- restoredCluster, err := env.GetCluster(namespace, restoredClusterName)
+ restoredCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName)
Expect(err).ToNot(HaveOccurred())
AssertClusterHasMountPointsAndVolumesForTablespaces(restoredCluster, 3,
- testTimeouts[testUtils.Short])
- AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[testUtils.Short])
- AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[testUtils.Short])
+ testTimeouts[timeouts.Short])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short])
+ AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short])
AssertTablespaceAndOwnerExist(cluster, "atablespace", "app")
AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha")
AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante")
@@ -359,17 +361,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
BeforeAll(func() {
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// We create the required credentials for MinIO
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -383,30 +386,32 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
It("can verify tablespaces and PVC were created", func() {
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.Short])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.Short])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.Short])
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.Short])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.Short])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short])
})
It("can create the volume snapshot backup declaratively and verify the backup", func() {
- backupName, err = env.GetResourceNameFromYAML(clusterVolumesnapshoBackupManifest)
+ backupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterVolumesnapshoBackupManifest)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() {
- backupObject = testUtils.ExecuteBackup(
+ backupObject = backups.Execute(
+ env.Ctx,
+ env.Client,
+ env.Scheme,
namespace,
clusterVolumesnapshoBackupManifest,
false,
- testTimeouts[testUtils.VolumeSnapshotIsReady],
- env,
+ testTimeouts[timeouts.VolumeSnapshotIsReady],
)
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName)
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName)
})
By("checking that volumeSnapshots are properly labeled", func() {
Eventually(func(g Gomega) {
for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements {
- volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name)
+ volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, snapshot.Name)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName))
g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name))
@@ -423,7 +428,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
tl1 := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: table1,
Tablespace: tablespace1,
}
@@ -431,20 +436,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
tl2 := TableLocator{
Namespace: namespace,
ClusterName: clusterName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: table2,
Tablespace: tablespace2,
}
AssertCreateTestData(env, tl2)
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Execute a checkpoint
- _, _, err = env.EventuallyExecQueryInInstancePod(
- testUtils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
- }, testUtils.PostgresDBName,
+ }, postgres.PostgresDBName,
"CHECKPOINT",
RetryTimeout,
PollingTime,
@@ -454,7 +460,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
backupName = clusterName + pgTime.GetCurrentTimestampWithFormat("20060102150405")
By("creating a volumeSnapshot and waiting until it's completed", func() {
- err := testUtils.CreateOnDemandBackupViaKubectlPlugin(
+ err := backups.CreateOnDemandBackupViaKubectlPlugin(
namespace,
clusterName,
backupName,
@@ -468,7 +474,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
AssertArchiveWalOnMinio(namespace, clusterName, clusterName)
Eventually(func(g Gomega) {
- backupList, err := env.GetBackupList(namespace)
+ backupList, err := backups.List(env.Ctx, env.Client, namespace)
g.Expect(err).ToNot(HaveOccurred())
for _, backup := range backupList.Items {
if backup.Name != backupName {
@@ -480,13 +486,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
backup.Status.Error)
g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(4))
}
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
})
By("checking that volumeSnapshots are properly labeled", func() {
Eventually(func(g Gomega) {
for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements {
- volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name)
+ volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, snapshot.Name)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName))
g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name))
@@ -502,36 +508,38 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
err = os.Setenv("BACKUP_NAME", backupName)
Expect(err).ToNot(HaveOccurred())
- clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterVolumesnapshoRestoreManifest)
+ clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme,
+ clusterVolumesnapshoRestoreManifest)
Expect(err).ToNot(HaveOccurred())
By("creating the cluster to be restored through snapshot", func() {
CreateResourceFromFile(namespace, clusterVolumesnapshoRestoreManifest)
- AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow],
+ AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow],
env)
})
By("verifying that tablespaces and PVC were created", func() {
- restoredCluster, err := env.GetCluster(namespace, clusterToRestoreName)
+ restoredCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace,
+ clusterToRestoreName)
Expect(err).ToNot(HaveOccurred())
AssertClusterHasMountPointsAndVolumesForTablespaces(restoredCluster, 2,
- testTimeouts[testUtils.Short])
- AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[testUtils.Short])
- AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[testUtils.Short])
+ testTimeouts[timeouts.Short])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short])
+ AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short])
})
By("verifying the correct data exists in the restored cluster", func() {
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToRestoreName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: table1,
}
AssertDataExpectedCount(env, tableLocator, 2)
tableLocator = TableLocator{
Namespace: namespace,
ClusterName: clusterToRestoreName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: table2,
}
AssertDataExpectedCount(env, tableLocator, 2)
@@ -541,11 +549,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
It(fmt.Sprintf("can create the cluster by recovery from volume snapshot backup with pitr %v", backupName),
func() {
By("inserting test data and creating WALs on the cluster to be snapshotted", func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -566,7 +577,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
// including the newly created data within the recovery_target_time
time.Sleep(1 * time.Second)
// Get the recovery_target_time and pass it to the template engine
- recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env)
+ recoveryTargetTime, err := postgres.GetCurrentTimestamp(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterName,
+ )
Expect(err).ToNot(HaveOccurred())
err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime)
Expect(err).ToNot(HaveOccurred())
@@ -586,45 +600,45 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
Expect(err).ToNot(HaveOccurred())
Expect(snapshotList.Items).To(HaveLen(len(backupObject.Status.BackupSnapshotStatus.Elements)))
- envVars := testUtils.EnvVarsForSnapshots{
+ envVars := storage.EnvVarsForSnapshots{
DataSnapshot: snapshotDataEnv,
WalSnapshot: snapshotWalEnv,
TablespaceSnapshotPrefix: snapshotTbsEnv,
}
- err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backupObject, envVars)
+ err = storage.SetSnapshotNameAsEnv(&snapshotList, backupObject, envVars)
Expect(err).ToNot(HaveOccurred())
})
- clusterToPITRName, err := env.GetResourceNameFromYAML(clusterVolumesnapshoPITRManifest)
+ clusterToPITRName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterVolumesnapshoPITRManifest)
Expect(err).ToNot(HaveOccurred())
By("creating the cluster to be restored through snapshot", func() {
CreateResourceFromFile(namespace, clusterVolumesnapshoPITRManifest)
- AssertClusterIsReady(namespace, clusterToPITRName, testTimeouts[testUtils.ClusterIsReadySlow],
+ AssertClusterIsReady(namespace, clusterToPITRName, testTimeouts[timeouts.ClusterIsReadySlow],
env)
})
By("can verify tablespaces and PVC were created", func() {
- recoveryCluster, err := env.GetCluster(namespace, clusterToPITRName)
+ recoveryCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterToPITRName)
Expect(err).ToNot(HaveOccurred())
AssertClusterHasMountPointsAndVolumesForTablespaces(recoveryCluster, 2,
- testTimeouts[testUtils.Short])
- AssertClusterHasPvcsAndDataDirsForTablespaces(recoveryCluster, testTimeouts[testUtils.Short])
- AssertDatabaseContainsTablespaces(recoveryCluster, testTimeouts[testUtils.Short])
+ testTimeouts[timeouts.Short])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(recoveryCluster, testTimeouts[timeouts.Short])
+ AssertDatabaseContainsTablespaces(recoveryCluster, testTimeouts[timeouts.Short])
})
By("verifying the correct data exists in the restored cluster", func() {
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToPITRName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: table1,
}
AssertDataExpectedCount(env, tableLocator, 4)
tableLocator = TableLocator{
Namespace: namespace,
ClusterName: clusterToPITRName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: table2,
}
AssertDataExpectedCount(env, tableLocator, 4)
@@ -638,14 +652,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
BeforeAll(func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
clusterSetup(namespace, clusterManifest)
})
It("can update cluster by adding tablespaces", func() {
By("adding tablespaces to the spec and patching", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeFalse())
@@ -664,30 +678,30 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
},
})
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
})
By("verify tablespaces and PVC were created", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout])
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout])
})
By("waiting for the cluster to be ready again", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
})
It("can hibernate via plugin a cluster with tablespaces", func() {
- assertCanHibernateClusterWithTablespaces(namespace, clusterName, testUtils.HibernateImperatively, 2)
+ assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateImperatively, 2)
})
It("can hibernate via annotation a cluster with tablespaces", func() {
- assertCanHibernateClusterWithTablespaces(namespace, clusterName, testUtils.HibernateDeclaratively, 6)
+ assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateDeclaratively, 6)
})
It("can fence a cluster with tablespaces using the plugin", func() {
@@ -696,13 +710,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
By("fencing the cluster", func() {
- err := testUtils.FencingOn(env, "*", namespace, clusterName, testUtils.UsingPlugin)
+ err := fencing.On(env.Ctx, env.Client, "*", namespace, clusterName, fencing.UsingPlugin)
Expect(err).ToNot(HaveOccurred())
})
By("check all instances become not ready", func() {
Eventually(func() (bool, error) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return false, err
}
@@ -721,13 +735,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
By("un-fencing the cluster", func() {
- err := testUtils.FencingOff(env, "*", namespace, clusterName, testUtils.UsingPlugin)
+ err := fencing.Off(env.Ctx, env.Client, "*", namespace, clusterName, fencing.UsingPlugin)
Expect(err).ToNot(HaveOccurred())
})
By("all instances become ready", func() {
Eventually(func() (bool, error) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return false, err
}
@@ -746,14 +760,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
By("verify tablespaces and PVC are there", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("verifying all PVCs for tablespaces are recreated", func() {
@@ -768,14 +782,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
BeforeAll(func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
clusterSetup(namespace, clusterManifest)
})
It("can update cluster adding tablespaces", func() {
By("patch cluster with primaryUpdateMethod=switchover", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeFalse())
@@ -785,10 +799,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
Expect(err).ToNot(HaveOccurred())
})
By("waiting for the cluster to be ready", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("adding tablespaces to the spec and patching", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeFalse())
@@ -810,21 +824,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
Expect(err).ToNot(HaveOccurred())
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
})
})
It("can verify tablespaces and PVC were created", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
})
})
@@ -855,7 +869,7 @@ func AssertTablespaceReconciled(
) {
By(fmt.Sprintf("checking if tablespace %v is in reconciled status", tablespaceName), func() {
Eventually(func(g Gomega) bool {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
for _, state := range cluster.Status.TablespacesStatus {
if state.State == apiv1.TablespaceStatusReconciled && state.Name == tablespaceName {
@@ -874,7 +888,7 @@ func AssertRoleReconciled(
) {
By(fmt.Sprintf("checking if role %v is in reconciled status", roleName), func() {
Eventually(func(g Gomega) bool {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
for state, names := range cluster.Status.ManagedRolesStatus.ByStatus {
if state == apiv1.RoleStatusReconciled {
@@ -911,7 +925,7 @@ func AssertClusterHasMountPointsAndVolumesForTablespaces(
Eventually(func(g Gomega) {
g.Expect(cluster.ContainsTablespaces()).To(BeTrue())
g.Expect(cluster.Spec.Tablespaces).To(HaveLen(numTablespaces))
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
g.Expect(pod.Spec.Containers).ToNot(BeEmpty())
@@ -967,7 +981,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo
clusterName := cluster.ObjectMeta.Name
By("checking all the required PVCs were created", func() {
Eventually(func(g Gomega) {
- pvcList, err := env.GetPVCList(namespace)
+ pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace)
g.Expect(err).ShouldNot(HaveOccurred())
var tablespacePvcNames []string
for _, pvc := range pvcList.Items {
@@ -987,7 +1001,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo
}
}
}
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
for _, tbsConfig := range cluster.Spec.Tablespaces {
@@ -999,13 +1013,14 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo
By("checking the data directory for the tablespaces is owned by postgres", func() {
Eventually(func(g Gomega) {
// minio may in the same namespace with cluster pod
- pvcList, err := env.GetClusterPodList(namespace, clusterName)
+ pvcList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ShouldNot(HaveOccurred())
for _, pod := range pvcList.Items {
for _, tbsConfig := range cluster.Spec.Tablespaces {
dataDir := fmt.Sprintf("/var/lib/postgresql/tablespaces/%s/data", tbsConfig.Name)
- owner, stdErr, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ owner, stdErr, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: pod.Name,
}, nil,
@@ -1030,17 +1045,18 @@ func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) {
clusterName := cluster.ObjectMeta.Name
By("checking the expected tablespaces are in the database", func() {
Eventually(func(g Gomega) {
- instances, err := env.GetClusterPodList(namespace, clusterName)
+ instances, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(err).ShouldNot(HaveOccurred())
var tbsListing string
for _, instance := range instances.Items {
var stdErr string
var err error
- tbsListing, stdErr, err = env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ tbsListing, stdErr, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: instance.Name,
- }, testUtils.AppDBName,
+ }, postgres.AppDBName,
"SELECT oid, spcname, pg_get_userbyid(spcowner) FROM pg_tablespace;",
)
g.Expect(stdErr).To(BeEmpty())
@@ -1059,16 +1075,17 @@ func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content st
clusterName := cluster.ObjectMeta.Name
By("checking the expected setting in a new PG session", func() {
Eventually(func(g Gomega) {
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
g.Expect(err).ShouldNot(HaveOccurred())
}
- settingValue, stdErr, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ settingValue, stdErr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary.Name,
- }, testUtils.AppDBName,
+ }, postgres.AppDBName,
"SHOW temp_tablespaces",
)
g.Expect(stdErr).To(BeEmpty())
@@ -1083,17 +1100,18 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace
namespace := cluster.ObjectMeta.Namespace
clusterName := cluster.ObjectMeta.Name
- primary, err := env.GetClusterPrimary(namespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
Expect(err).ShouldNot(HaveOccurred())
}
By("checking the temporary table is created into the temporary tablespace", func() {
- commandOutput, stdErr, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ commandOutput, stdErr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primary.Name,
- }, testUtils.AppDBName,
+ }, postgres.AppDBName,
"CREATE TEMPORARY TABLE cnp_e2e_test_table (i INTEGER); "+
"SELECT spcname FROM pg_tablespace WHERE OID="+
"(SELECT reltablespace FROM pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)",
@@ -1109,13 +1127,14 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace
func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner string) {
namespace := cluster.ObjectMeta.Namespace
clusterName := cluster.ObjectMeta.Name
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ShouldNot(HaveOccurred())
- result, stdErr, err := env.ExecQueryInInstancePod(
- testUtils.PodLocator{
+ result, stdErr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: primaryPod.Name,
- }, testUtils.AppDBName,
+ }, postgres.AppDBName,
fmt.Sprintf("SELECT 1 FROM pg_tablespace WHERE spcname = '%s' AND pg_get_userbyid(spcowner) = '%s';",
tablespace,
owner),
@@ -1129,7 +1148,7 @@ func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner str
func assertCanHibernateClusterWithTablespaces(
namespace string,
clusterName string,
- method testUtils.HibernationMethod,
+ method hibernationMethod,
keptPVCs int,
) {
By("verifying expected PVCs for tablespaces before hibernate", func() {
@@ -1137,13 +1156,13 @@ func assertCanHibernateClusterWithTablespaces(
})
By("hibernate the cluster", func() {
- err := testUtils.HibernateOn(env, namespace, clusterName, method)
+ err := hibernateOn(env.Ctx, env.Client, namespace, clusterName, method)
Expect(err).ToNot(HaveOccurred())
})
By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() {
Eventually(func(g Gomega) {
- podList, _ := env.GetClusterPodList(namespace, clusterName)
+ podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
g.Expect(podList.Items).Should(BeEmpty())
}, 300).Should(Succeed())
})
@@ -1153,22 +1172,22 @@ func assertCanHibernateClusterWithTablespaces(
})
By("hibernate off the cluster", func() {
- err := testUtils.HibernateOff(env, namespace, clusterName, method)
+ err := hibernateOff(env.Ctx, env.Client, namespace, clusterName, method)
Expect(err).ToNot(HaveOccurred())
})
By("waiting for the cluster to be ready", func() {
- AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env)
})
By("verify tablespaces and PVC are there", func() {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
Expect(cluster.ContainsTablespaces()).To(BeTrue())
- AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout])
- AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout])
- AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout])
+ AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout])
+ AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout])
+ AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout])
})
By("verifying all PVCs for tablespaces are recreated", func() {
@@ -1179,7 +1198,7 @@ func assertCanHibernateClusterWithTablespaces(
func eventuallyHasExpectedNumberOfPVCs(pvcCount int, namespace string) {
By(fmt.Sprintf("checking cluster eventually has %d PVCs for tablespaces", pvcCount))
Eventually(func(g Gomega) {
- pvcList, err := env.GetPVCList(namespace)
+ pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace)
g.Expect(err).ShouldNot(HaveOccurred())
tbsPvc := 0
for _, pvc := range pvcList.Items {
@@ -1190,12 +1209,12 @@ func eventuallyHasExpectedNumberOfPVCs(pvcCount int, namespace string) {
tbsPvc++
}
g.Expect(tbsPvc).Should(Equal(pvcCount))
- }, testTimeouts[testUtils.ClusterIsReady]).Should(Succeed())
+ }, testTimeouts[timeouts.ClusterIsReady]).Should(Succeed())
}
func eventuallyHasCompletedBackups(namespace string, numBackups int) {
Eventually(func(g Gomega) {
- backups, err := env.GetBackupList(namespace)
+ backups, err := backups.List(env.Ctx, env.Client, namespace)
Expect(err).ShouldNot(HaveOccurred())
Expect(backups.Items).To(HaveLen(numBackups))
@@ -1252,3 +1271,78 @@ func getSnapshots(
return snapshotList, nil
}
+
+type hibernationMethod string
+
+const (
+ // hibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method
+ hibernateDeclaratively hibernationMethod = "annotation"
+ // hibernateImperatively it is a keyword to use while fencing on/off the instances using plugin method
+ hibernateImperatively hibernationMethod = "plugin"
+)
+
+func hibernateOn(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace,
+ clusterName string,
+ method hibernationMethod,
+) error {
+ switch method {
+ case hibernateImperatively:
+ _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate on %v -n %v",
+ clusterName, namespace))
+ if err != nil {
+ return err
+ }
+ return nil
+ case hibernateDeclaratively:
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return err
+ }
+ if cluster.Annotations == nil {
+ cluster.Annotations = make(map[string]string)
+ }
+ originCluster := cluster.DeepCopy()
+ cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOn
+
+ err = crudClient.Patch(context.Background(), cluster, client.MergeFrom(originCluster))
+ return err
+ default:
+ return fmt.Errorf("unknown method: %v", method)
+ }
+}
+
+func hibernateOff(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace,
+ clusterName string,
+ method hibernationMethod,
+) error {
+ switch method {
+ case hibernateImperatively:
+ _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate off %v -n %v",
+ clusterName, namespace))
+ if err != nil {
+ return err
+ }
+ return nil
+ case hibernateDeclaratively:
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return err
+ }
+ if cluster.Annotations == nil {
+ cluster.Annotations = make(map[string]string)
+ }
+ originCluster := cluster.DeepCopy()
+ cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOff
+
+ err = crudClient.Patch(context.Background(), cluster, client.MergeFrom(originCluster))
+ return err
+ default:
+ return fmt.Errorf("unknown method: %v", method)
+ }
+}
diff --git a/tests/e2e/tolerations_test.go b/tests/e2e/tolerations_test.go
index c5862874cf..16f81c6d52 100644
--- a/tests/e2e/tolerations_test.go
+++ b/tests/e2e/tolerations_test.go
@@ -20,7 +20,8 @@ import (
"fmt"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -48,7 +49,7 @@ var _ = Describe("E2E Tolerations Node", Serial, Label(tests.LabelDisruptive, te
AfterEach(func() {
for _, node := range taintedNodes {
cmd := fmt.Sprintf("kubectl taint node %v %s=test:NoSchedule-", node, tolerationKey)
- _, _, err := utils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
}
taintedNodes = nil
@@ -57,16 +58,16 @@ var _ = Describe("E2E Tolerations Node", Serial, Label(tests.LabelDisruptive, te
It("can create a cluster with tolerations", func() {
var err error
// Initialize empty global namespace variable
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("tainting all the nodes", func() {
- nodes, _ := env.GetNodeList()
+ nodes, _ := nodes.List(env.Ctx, env.Client)
// We taint all the nodes where we could run the workloads
for _, node := range nodes.Items {
if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) {
cmd := fmt.Sprintf("kubectl taint node %v %s=test:NoSchedule", node.Name, tolerationKey)
- _, _, err := utils.Run(cmd)
+ _, _, err := run.Run(cmd)
Expect(err).ToNot(HaveOccurred())
taintedNodes = append(taintedNodes, node.Name)
}
diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go
index b3824000c2..44d4784f70 100644
--- a/tests/e2e/update_user_test.go
+++ b/tests/e2e/update_user_test.go
@@ -27,7 +27,12 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -50,25 +55,25 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
It("can update the user application password", func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
appSecretName := clusterName + apiv1.ApplicationUserSecretSuffix
superUserSecretName := clusterName + apiv1.SuperUserSecretSuffix
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("update user application password", func() {
const newPassword = "eeh2Zahohx" //nolint:gosec
AssertUpdateSecret("password", newPassword, appSecretName, namespace, clusterName, 30, env)
- AssertConnection(namespace, rwService, testsUtils.AppDBName, testsUtils.AppUser, newPassword, env)
+ AssertConnection(namespace, rwService, postgres.AppDBName, postgres.AppUser, newPassword, env)
})
By("fail updating user application password with wrong user in secret", func() {
@@ -79,21 +84,21 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
AssertUpdateSecret("username", newUser, appSecretName, namespace, clusterName, 30, env)
timeout := time.Second * 10
- dsn := testsUtils.CreateDSN(rwService, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432)
+ dsn := services.CreateDSN(rwService, newUser, postgres.AppDBName, newPassword, services.Require, 5432)
- _, _, err := env.ExecCommand(env.Ctx, *primaryPod,
+ _, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, *primaryPod,
specs.PostgresContainerName, &timeout,
"psql", dsn, "-tAc", "SELECT 1")
Expect(err).To(HaveOccurred())
// Revert the username change
- AssertUpdateSecret("username", testsUtils.AppUser, appSecretName, namespace, clusterName, 30, env)
+ AssertUpdateSecret("username", postgres.AppUser, appSecretName, namespace, clusterName, 30, env)
})
By("update superuser password", func() {
// Setting EnableSuperuserAccess to true
Eventually(func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
cluster.Spec.EnableSuperuserAccess = ptr.To(true)
return env.Client.Update(env.Ctx, cluster)
@@ -112,7 +117,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC
const newPassword = "fi6uCae7" //nolint:gosec
AssertUpdateSecret("password", newPassword, superUserSecretName, namespace, clusterName, 30, env)
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName, testsUtils.PostgresUser, newPassword, env)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, postgres.PostgresUser, newPassword, env)
})
})
})
@@ -134,13 +139,13 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
It("enable and disable superuser access", func() {
var err error
// Create a cluster in a namespace we'll delete after the test
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, sampleFile, env)
- rwService := testsUtils.GetReadWriteServiceName(clusterName)
+ rwService := services.GetReadWriteServiceName(clusterName)
secretName := clusterName + apiv1.SuperUserSecretSuffix
var secret corev1.Secret
@@ -149,7 +154,7 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
Name: secretName,
}
- primaryPod, err := env.GetClusterPrimary(namespace, clusterName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
By("ensure superuser access is disabled by default", func() {
@@ -162,12 +167,13 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
query := "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'"
// We should have the `postgres` user with a null password
Eventually(func() string {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primaryPod.Namespace,
PodName: primaryPod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
query)
if err != nil {
return ""
@@ -179,7 +185,7 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
By("enable superuser access", func() {
// Setting EnableSuperuserAccess to true
Eventually(func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
cluster.Spec.EnableSuperuserAccess = ptr.To(true)
return env.Client.Update(env.Ctx, cluster)
@@ -191,16 +197,18 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi
g.Expect(err).ToNot(HaveOccurred())
}, 90).WithPolling(time.Second).Should(Succeed())
- superUser, superUserPass, err := testsUtils.GetCredentials(clusterName, namespace,
- apiv1.SuperUserSecretSuffix, env)
+ superUser, superUserPass, err := secrets.GetCredentials(
+ env.Ctx, env.Client,
+ clusterName, namespace, apiv1.SuperUserSecretSuffix,
+ )
Expect(err).ToNot(HaveOccurred())
- AssertConnection(namespace, rwService, testsUtils.PostgresDBName, superUser, superUserPass, env)
+ AssertConnection(namespace, rwService, postgres.PostgresDBName, superUser, superUserPass, env)
})
By("disable superuser access", func() {
// Setting EnableSuperuserAccess to false
Eventually(func() error {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
Expect(err).NotTo(HaveOccurred())
cluster.Spec.EnableSuperuserAccess = ptr.To(false)
return env.Client.Update(env.Ctx, cluster)
diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go
index f698d29c6f..c96704c38c 100644
--- a/tests/e2e/upgrade_test.go
+++ b/tests/e2e/upgrade_test.go
@@ -36,8 +36,16 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -120,14 +128,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// Since the 'cnpg-system' namespace is deleted after each spec is completed,
// we should create it and then create the pull image secret
- err := env.EnsureNamespace(operatorNamespace)
+ err := namespaces.EnsureNamespace(env.Ctx, env.Client, operatorNamespace)
Expect(err).NotTo(HaveOccurred())
dockerServer := os.Getenv("DOCKER_SERVER")
dockerUsername := os.Getenv("DOCKER_USERNAME")
dockerPassword := os.Getenv("DOCKER_PASSWORD")
if dockerServer != "" && dockerUsername != "" && dockerPassword != "" {
- _, _, err := testsUtils.Run(fmt.Sprintf(`kubectl -n %v create secret docker-registry
+ _, _, err := run.Run(fmt.Sprintf(`kubectl -n %v create secret docker-registry
cnpg-pull-secret
--docker-server="%v"
--docker-username="%v"
@@ -170,10 +178,10 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
databaseName := "appdb"
By("checking basic functionality performing a configuration upgrade on the cluster", func() {
- podList, err := env.GetClusterPodList(upgradeNamespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName)
Expect(err).ToNot(HaveOccurred())
// Gather current primary
- cluster, err := env.GetCluster(upgradeNamespace, clusterName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName)
Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary))
oldPrimary := cluster.Status.CurrentPrimary
@@ -191,12 +199,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// Check that both parameters have been modified in each pod
for _, pod := range podList.Items {
Eventually(func() (int, error) {
- stdout, stderr, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, stderr, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
"show max_replication_slots")
if err != nil {
return 0, err
@@ -210,12 +219,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
"Pod %v should have updated its config", pod.Name)
Eventually(func() (int, error, error) {
- stdout, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.PostgresDBName,
+ postgres.PostgresDBName,
"show maintenance_work_mem")
value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n"))
return value, err, atoiErr
@@ -224,7 +234,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
}
// Check that a switchover happened
Eventually(func() (bool, error) {
- c, err := env.GetCluster(upgradeNamespace, clusterName)
+ c, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName)
Expect(err).ToNot(HaveOccurred())
GinkgoWriter.Printf("Current Primary: %s, Current Primary timestamp: %s\n",
@@ -243,15 +253,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
By("verifying that all the standbys streams from the primary", func() {
// To check this we find the primary and create a table on it.
// The table should be replicated on the standbys.
- primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, upgradeNamespace, clusterName)
Expect(err).ToNot(HaveOccurred())
query := "CREATE TABLE IF NOT EXISTS postswitch(i int);"
- _, _, err = env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primary.Namespace,
PodName: primary.Name,
- }, testsUtils.DatabaseName(databaseName),
+ }, exec.DatabaseName(databaseName),
query,
RetryTimeout,
PollingTime,
@@ -270,12 +281,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
return "", err
}
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- testsUtils.DatabaseName(databaseName),
+ exec.DatabaseName(databaseName),
"SELECT count(*) = 0 FROM postswitch")
return strings.TrimSpace(out), err
}, 240).Should(BeEquivalentTo("t"),
@@ -286,12 +298,12 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// getExecutableHashesFromInstances prints the manager's executable hash of each pod to a given IO writer
getExecutableHashesFromInstances := func(upgradeNamespace, clusterName string, w io.Writer) error {
- pods, err := env.GetClusterPodList(upgradeNamespace, clusterName)
+ pods, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName)
if err != nil {
return err
}
for _, pod := range pods.Items {
- status, err := testsUtils.RetrievePgStatusFromInstance(env, pod, true)
+ status, err := proxy.RetrievePgStatusFromInstance(env.Ctx, env.Interface, pod, true)
if err != nil {
continue
}
@@ -367,7 +379,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
}
err := retry.OnError(backoffCheckingPodRestarts, shouldRetry, func() error {
var currentUIDs []types.UID
- currentPodList, err := env.GetClusterPodList(namespace, clusterName)
+ currentPodList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
if err != nil {
return err
}
@@ -388,15 +400,20 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
GinkgoWriter.Println("cleaning up")
if CurrentSpecReport().Failed() {
// Dump the minio namespace when failed
- env.DumpNamespaceObjects(minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log")
+ namespaces.DumpNamespaceObjects(
+ env.Ctx, env.Client,
+ minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log",
+ )
// Dump the operator namespace, as operator is changing too
- env.DumpOperator(operatorNamespace,
+ operator.Dump(
+ env.Ctx, env.Client,
+ operatorNamespace,
"out/"+CurrentSpecReport().LeafNodeText+"operator.log")
}
// Delete the operator's namespace in case that the previous test make corrupted changes to
// the operator's namespace so that affects subsequent test
- if err := env.DeleteNamespaceAndWait(operatorNamespace, 60); err != nil {
+ if err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, operatorNamespace, 60); err != nil {
return fmt.Errorf("could not cleanup, failed to delete operator namespace: %v", err)
}
@@ -419,7 +436,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
namespacePrefix), func() {
var err error
// Create a upgradeNamespace for all the resources
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Creating a upgradeNamespace should be quick
@@ -440,18 +457,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
deployOperator := func(operatorManifestFile string) {
By(fmt.Sprintf("applying manager manifest %s", operatorManifestFile), func() {
// Upgrade to the new version
- _, stderr, err := testsUtils.Run(
+ _, stderr, err := run.Run(
fmt.Sprintf("kubectl apply --server-side --force-conflicts -f %v", operatorManifestFile))
Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr)
})
By("waiting for the deployment to be rolled out", func() {
- deployment, err := env.GetOperatorDeployment()
+ deployment, err := operator.GetDeployment(env.Ctx, env.Client)
Expect(err).NotTo(HaveOccurred())
timeout := 240
Eventually(func() error {
- _, stderr, err := testsUtils.Run(fmt.Sprintf(
+ _, stderr, err := run.Run(fmt.Sprintf(
"kubectl -n %v rollout status deployment %v -w --timeout=%vs",
operatorNamespace,
deployment.Name,
@@ -465,7 +482,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
}, timeout).ShouldNot(HaveOccurred())
})
By("getting the operator info", func() {
- pod, err := env.GetOperatorPod()
+ pod, err := operator.GetPod(env.Ctx, env.Client)
Expect(err).NotTo(HaveOccurred())
GinkgoWriter.Println("image used for operator", pod.Spec.Containers[0].Image)
})
@@ -481,12 +498,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
CreateResourceFromFile(upgradeNamespace, pgSecrets)
})
By("creating the cloud storage credentials", func() {
- _, err := testsUtils.CreateObjectStorageSecret(
+ _, err := secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
upgradeNamespace,
"aws-creds",
"minio",
"minio123",
- env,
)
Expect(err).NotTo(HaveOccurred())
})
@@ -516,7 +534,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// By doing that we don't test that the online upgrade won't
// trigger any Pod restart. We still test that the operator
// is upgraded in this case too.
- _, stderr, err := testsUtils.Run(
+ _, stderr, err := run.Run(
fmt.Sprintf("kubectl annotate -n %s cluster/%s cnpg.io/reconcilePodSpec=disabled",
upgradeNamespace, clusterName1))
Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr)
@@ -525,7 +543,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// Cluster ready happens after minio is ready
By("having a Cluster with three instances ready", func() {
- AssertClusterIsReady(upgradeNamespace, clusterName1, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(upgradeNamespace, clusterName1, testTimeouts[timeouts.ClusterIsReady], env)
})
By("creating a Pooler with two instances", func() {
@@ -535,15 +553,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// Now that everything is in place, we add a bit of data we'll use to
// check if the backup is working
By("creating data on the database", func() {
- primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName1)
+ primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, upgradeNamespace, clusterName1)
Expect(err).ToNot(HaveOccurred())
query := "CREATE TABLE IF NOT EXISTS to_restore AS VALUES (1),(2);"
- _, _, err = env.EventuallyExecQueryInInstancePod(
- testsUtils.PodLocator{
+ _, _, err = exec.EventuallyExecQueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: primary.Namespace,
PodName: primary.Name,
- }, testsUtils.DatabaseName(databaseName),
+ }, exec.DatabaseName(databaseName),
query,
RetryTimeout,
PollingTime,
@@ -571,8 +590,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// A file called data.tar.gz should be available on minio
Eventually(func() (int, error, error) {
- out, _, err := env.ExecCommandInContainer(
- testsUtils.ContainerLocator{
+ out, _, err := exec.CommandInContainer(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.ContainerLocator{
Namespace: minioEnv.Namespace,
PodName: minioEnv.Client.Name,
ContainerName: "mc",
@@ -592,7 +612,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
assertPGBouncerPodsAreReady(upgradeNamespace, pgBouncerSampleFile, 2)
var podUIDs []types.UID
- podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName1)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
podUIDs = append(podUIDs, pod.GetUID())
@@ -641,7 +661,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// the instance pods should not restart
By("verifying that the instance pods are not restarted", func() {
- podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName1)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
Expect(pod.Status.ContainerStatuses).NotTo(BeEmpty())
@@ -656,7 +676,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
err := os.Setenv("SERVER_NAME", serverName2)
Expect(err).ToNot(HaveOccurred())
CreateResourceFromFile(upgradeNamespace, sampleFile2)
- AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[testsUtils.ClusterIsReady], env)
+ AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[timeouts.ClusterIsReady], env)
})
AssertConfUpgrade(clusterName2, upgradeNamespace)
@@ -666,17 +686,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
By("restoring the backup taken from the first Cluster in a new cluster", func() {
restoredClusterName := "cluster-restore"
CreateResourceFromFile(upgradeNamespace, restoreFile)
- AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow],
+ AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow],
env)
// Test data should be present on restored primary
primary := restoredClusterName + "-1"
- out, _, err := env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err := exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: upgradeNamespace,
PodName: primary,
},
- testsUtils.DatabaseName(databaseName),
+ exec.DatabaseName(databaseName),
"SELECT count(*) FROM to_restore")
Expect(strings.Trim(out, "\n"), err).To(BeEquivalentTo("2"))
@@ -684,12 +705,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// we expect a promotion. We can't enforce "2" because the timeline
// ID will also depend on the history files existing in the cloud
// storage and we don't know the status of that.
- out, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: upgradeNamespace,
PodName: primary,
},
- testsUtils.DatabaseName(databaseName),
+ exec.DatabaseName(databaseName),
"select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)")
Expect(err).NotTo(HaveOccurred())
Expect(strconv.Atoi(strings.Trim(out, "\n"))).To(
@@ -697,12 +719,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// Restored standbys should soon attach themselves to restored primary
Eventually(func() (string, error) {
- out, _, err = env.ExecQueryInInstancePod(
- testsUtils.PodLocator{
+ out, _, err = exec.QueryInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: upgradeNamespace,
PodName: primary,
},
- testsUtils.DatabaseName(databaseName),
+ exec.DatabaseName(databaseName),
"SELECT count(*) FROM pg_stat_replication")
return strings.Trim(out, "\n"), err
}, 180).Should(BeEquivalentTo("2"))
@@ -756,13 +779,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
It("keeps clusters working after a rolling upgrade", func() {
upgradeNamespacePrefix := rollingUpgradeNamespace
By("applying environment changes for current upgrade to be performed", func() {
- testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, false, env)
+ operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, false)
})
- mostRecentTag, err := testsUtils.GetMostRecentReleaseTag("../../releases")
+ mostRecentTag, err := operator.GetMostRecentReleaseTag("../../releases")
Expect(err).NotTo(HaveOccurred())
GinkgoWriter.Printf("installing the recent CNPG tag %s\n", mostRecentTag)
- testsUtils.InstallLatestCNPGOperator(mostRecentTag, env)
+ operator.InstallLatest(env.Client, mostRecentTag)
DeferCleanup(cleanupOperatorAndMinio)
upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix)
@@ -772,14 +795,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
It("keeps clusters working after an online upgrade", func() {
upgradeNamespacePrefix := onlineUpgradeNamespace
By("applying environment changes for current upgrade to be performed", func() {
- testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, true, env)
+ operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, true)
})
- mostRecentTag, err := testsUtils.GetMostRecentReleaseTag("../../releases")
+ mostRecentTag, err := operator.GetMostRecentReleaseTag("../../releases")
Expect(err).NotTo(HaveOccurred())
GinkgoWriter.Printf("installing the recent CNPG tag %s\n", mostRecentTag)
- testsUtils.InstallLatestCNPGOperator(mostRecentTag, env)
+ operator.InstallLatest(env.Client, mostRecentTag)
DeferCleanup(cleanupOperatorAndMinio)
upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix)
@@ -796,7 +819,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
It("keeps clusters working after an online upgrade", func() {
upgradeNamespacePrefix := onlineUpgradeNamespace
By("applying environment changes for current upgrade to be performed", func() {
- testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, true, env)
+ operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, true)
})
GinkgoWriter.Printf("installing the current operator %s\n", currentOperatorManifest)
@@ -810,7 +833,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
It("keeps clusters working after a rolling upgrade", func() {
upgradeNamespacePrefix := rollingUpgradeNamespace
By("applying environment changes for current upgrade to be performed", func() {
- testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, false, env)
+ operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, false)
})
GinkgoWriter.Printf("installing the current operator %s\n", currentOperatorManifest)
deployOperator(currentOperatorManifest)
diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go
index 74f32e8bca..fce9351788 100644
--- a/tests/e2e/volume_snapshot_test.go
+++ b/tests/e2e/volume_snapshot_test.go
@@ -31,8 +31,14 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -75,11 +81,11 @@ var _ = Describe("Verify Volume Snapshot",
Skip("Test depth is lower than the amount requested for this test")
}
var err error
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
// Initializing namespace variable to be used in test case
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
// Creating a cluster with three nodes
@@ -89,7 +95,7 @@ var _ = Describe("Verify Volume Snapshot",
It("can create a Volume Snapshot", func() {
var backupObject apiv1.Backup
By("creating a volumeSnapshot and waiting until it's completed", func() {
- err := testUtils.CreateOnDemandBackupViaKubectlPlugin(
+ err := backups.CreateOnDemandBackupViaKubectlPlugin(
namespace,
clusterName,
"",
@@ -101,7 +107,7 @@ var _ = Describe("Verify Volume Snapshot",
// trigger a checkpoint as the backup may run on standby
CheckPointAndSwitchWalOnPrimary(namespace, clusterName)
Eventually(func(g Gomega) {
- backupList, err := env.GetBackupList(namespace)
+ backupList, err := backups.List(env.Ctx, env.Client, namespace)
g.Expect(err).ToNot(HaveOccurred())
for _, backup := range backupList.Items {
if !strings.Contains(backup.Name, clusterName) {
@@ -113,13 +119,14 @@ var _ = Describe("Verify Volume Snapshot",
backup.Status.Error)
g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2))
}
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
})
By("checking that volumeSnapshots are properly labeled", func() {
Eventually(func(g Gomega) {
for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements {
- volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name)
+ volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace,
+ snapshot.Name)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName))
g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name))
@@ -157,10 +164,10 @@ var _ = Describe("Verify Volume Snapshot",
}
var err error
- clusterToSnapshotName, err = env.GetResourceNameFromYAML(clusterToSnapshot)
+ clusterToSnapshotName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToSnapshot)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("create the certificates for MinIO", func() {
@@ -168,12 +175,14 @@ var _ = Describe("Verify Volume Snapshot",
Expect(err).ToNot(HaveOccurred())
})
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env)
+ )
Expect(err).ToNot(HaveOccurred())
})
@@ -194,11 +203,13 @@ var _ = Describe("Verify Volume Snapshot",
})
By("verify test connectivity to minio using barman-cloud-wal-archive script", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ clusterToSnapshotName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive(
- namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName)
+ namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123",
+ minioEnv.ServiceName)
if err != nil {
return false, err
}
@@ -210,13 +221,15 @@ var _ = Describe("Verify Volume Snapshot",
By("creating a snapshot and waiting until it's completed", func() {
var err error
backupName := fmt.Sprintf("%s-example", clusterToSnapshotName)
- backup, err = testUtils.CreateOnDemandBackup(
+ backup, err = backups.CreateOnDemand(
+ env.Ctx,
+ env.Client,
namespace,
clusterToSnapshotName,
backupName,
apiv1.BackupTargetStandby,
apiv1.BackupMethodVolumeSnapshot,
- env)
+ )
Expect(err).ToNot(HaveOccurred())
// trigger a checkpoint
CheckPointAndSwitchWalOnPrimary(namespace, clusterToSnapshotName)
@@ -231,7 +244,7 @@ var _ = Describe("Verify Volume Snapshot",
"Backup should be completed correctly, error message is '%s'",
backup.Status.Error)
g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2))
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
})
By("fetching the volume snapshots", func() {
@@ -239,11 +252,11 @@ var _ = Describe("Verify Volume Snapshot",
Expect(err).ToNot(HaveOccurred())
Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements)))
- envVars := testUtils.EnvVarsForSnapshots{
+ envVars := storage.EnvVarsForSnapshots{
DataSnapshot: snapshotDataEnv,
WalSnapshot: snapshotWalEnv,
}
- err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars)
+ err = storage.SetSnapshotNameAsEnv(&snapshotList, backup, envVars)
Expect(err).ToNot(HaveOccurred())
})
@@ -252,7 +265,7 @@ var _ = Describe("Verify Volume Snapshot",
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToSnapshotName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -262,16 +275,22 @@ var _ = Describe("Verify Volume Snapshot",
// including the newly created data within the recovery_target_time
time.Sleep(1 * time.Second)
// Get the recovery_target_time and pass it to the template engine
- recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterToSnapshotName, env)
+ recoveryTargetTime, err := postgres.GetCurrentTimestamp(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ namespace, clusterToSnapshotName,
+ )
Expect(err).ToNot(HaveOccurred())
err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime)
Expect(err).ToNot(HaveOccurred())
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterToSnapshotName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -287,19 +306,20 @@ var _ = Describe("Verify Volume Snapshot",
AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName)
})
- clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterSnapshotRestoreFile)
+ clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterSnapshotRestoreFile)
Expect(err).ToNot(HaveOccurred())
By("creating the cluster to be restored through snapshot and PITR", func() {
AssertCreateCluster(namespace, clusterToRestoreName, clusterSnapshotRestoreFile, env)
- AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow],
+ env)
})
By("verifying the correct data exists in the restored cluster", func() {
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToRestoreName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
@@ -369,18 +389,18 @@ var _ = Describe("Verify Volume Snapshot",
}
var err error
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() {
_ = os.Unsetenv(snapshotDataEnv)
_ = os.Unsetenv(snapshotWalEnv)
})
- clusterToBackupName, err = env.GetResourceNameFromYAML(clusterToBackupFilePath)
+ clusterToBackupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToBackupFilePath)
Expect(err).ToNot(HaveOccurred())
By("creating the cluster on which to execute the backup", func() {
AssertCreateCluster(namespace, clusterToBackupName, clusterToBackupFilePath, env)
- AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadySlow], env)
})
})
@@ -389,13 +409,13 @@ var _ = Describe("Verify Volume Snapshot",
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToBackupName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
})
- backupName, err := env.GetResourceNameFromYAML(backupFileFilePath)
+ backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupFileFilePath)
Expect(err).ToNot(HaveOccurred())
By("executing the backup", func() {
@@ -406,13 +426,14 @@ var _ = Describe("Verify Volume Snapshot",
var backup apiv1.Backup
By("waiting the backup to complete", func() {
Eventually(func(g Gomega) {
- err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup)
+ err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace},
+ &backup)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted),
"Backup should be completed correctly, error message is '%s'",
backup.Status.Error)
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName)
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName)
})
By("checking that the backup status is correctly populated", func() {
@@ -427,26 +448,26 @@ var _ = Describe("Verify Volume Snapshot",
var clusterToBackup *apiv1.Cluster
By("fetching the created cluster", func() {
- clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName)
+ clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName)
Expect(err).ToNot(HaveOccurred())
})
snapshotList := getAndVerifySnapshots(clusterToBackup, backup)
- envVars := testUtils.EnvVarsForSnapshots{
+ envVars := storage.EnvVarsForSnapshots{
DataSnapshot: snapshotDataEnv,
WalSnapshot: snapshotWalEnv,
}
- err = testUtils.SetSnapshotNameAsEnv(&snapshotList, &backup, envVars)
+ err = storage.SetSnapshotNameAsEnv(&snapshotList, &backup, envVars)
Expect(err).ToNot(HaveOccurred())
- clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterToRestoreFilePath)
+ clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterToRestoreFilePath)
Expect(err).ToNot(HaveOccurred())
By("executing the restore", func() {
CreateResourceFromFile(namespace, clusterToRestoreFilePath)
AssertClusterIsReady(namespace,
clusterToRestoreName,
- testTimeouts[testUtils.ClusterIsReady],
+ testTimeouts[timeouts.ClusterIsReady],
env,
)
})
@@ -455,14 +476,14 @@ var _ = Describe("Verify Volume Snapshot",
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToRestoreName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 2)
})
})
It("can take a snapshot targeting the primary", func() {
- backupName, err := env.GetResourceNameFromYAML(backupPrimaryFilePath)
+ backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupPrimaryFilePath)
Expect(err).ToNot(HaveOccurred())
By("executing the backup", func() {
@@ -473,14 +494,15 @@ var _ = Describe("Verify Volume Snapshot",
var backup apiv1.Backup
By("waiting the backup to complete", func() {
Eventually(func(g Gomega) {
- err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup)
+ err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace},
+ &backup)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(backup.Status.Phase).To(
BeEquivalentTo(apiv1.BackupPhaseCompleted),
"Backup should be completed correctly, error message is '%s'",
backup.Status.Error)
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName)
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName)
})
By("checking that the backup status is correctly populated", func() {
@@ -495,20 +517,21 @@ var _ = Describe("Verify Volume Snapshot",
var clusterToBackup *apiv1.Cluster
By("fetching the created cluster", func() {
- clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName)
+ clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName)
Expect(err).ToNot(HaveOccurred())
})
_ = getAndVerifySnapshots(clusterToBackup, backup)
By("ensuring cluster resumes after snapshot", func() {
- AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadyQuick],
+ env)
})
})
It("can take a snapshot in a single instance cluster", func() {
By("scaling down the cluster to a single instance", func() {
- cluster, err := env.GetCluster(namespace, clusterToBackupName)
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName)
Expect(err).ToNot(HaveOccurred())
updated := cluster.DeepCopy()
@@ -519,21 +542,23 @@ var _ = Describe("Verify Volume Snapshot",
By("ensuring there is only one pod", func() {
Eventually(func(g Gomega) {
- pods, err := env.GetClusterPodList(namespace, clusterToBackupName)
+ pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterToBackupName)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pods.Items).To(HaveLen(1))
- }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed())
+ }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed())
})
backupName := "single-instance-snap"
By("taking a backup snapshot", func() {
- _, err := testUtils.CreateOnDemandBackup(
+ _, err := backups.CreateOnDemand(
+ env.Ctx,
+ env.Client,
namespace,
clusterToBackupName,
backupName,
apiv1.BackupTargetStandby,
apiv1.BackupMethodVolumeSnapshot,
- env)
+ )
Expect(err).NotTo(HaveOccurred())
})
@@ -541,13 +566,14 @@ var _ = Describe("Verify Volume Snapshot",
var backup apiv1.Backup
By("waiting the backup to complete", func() {
Eventually(func(g Gomega) {
- err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup)
+ err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace},
+ &backup)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted),
"Backup should be completed correctly, error message is '%s'",
backup.Status.Error)
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
- testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName)
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
+ backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName)
})
By("checking that the backup status is correctly populated", func() {
@@ -562,14 +588,15 @@ var _ = Describe("Verify Volume Snapshot",
var clusterToBackup *apiv1.Cluster
By("fetching the created cluster", func() {
var err error
- clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName)
+ clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName)
Expect(err).ToNot(HaveOccurred())
})
_ = getAndVerifySnapshots(clusterToBackup, backup)
By("ensuring cluster resumes after snapshot", func() {
- AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadyQuick], env)
+ AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadyQuick],
+ env)
})
})
})
@@ -591,17 +618,17 @@ var _ = Describe("Verify Volume Snapshot",
)
var clusterToSnapshotName string
- var backup *apiv1.Backup
+ var backupTaken *apiv1.Backup
BeforeAll(func() {
if testLevelEnv.Depth < int(level) {
Skip("Test depth is lower than the amount requested for this test")
}
var err error
- clusterToSnapshotName, err = env.GetResourceNameFromYAML(clusterToSnapshot)
+ clusterToSnapshotName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToSnapshot)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("create the certificates for MinIO", func() {
@@ -610,12 +637,13 @@ var _ = Describe("Verify Volume Snapshot",
})
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -625,11 +653,13 @@ var _ = Describe("Verify Volume Snapshot",
})
By("verify test connectivity to minio using barman-cloud-wal-archive script", func() {
- primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName)
+ primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace,
+ clusterToSnapshotName)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive(
- namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName)
+ namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123",
+ minioEnv.ServiceName)
if err != nil {
return false, err
}
@@ -648,11 +678,14 @@ var _ = Describe("Verify Volume Snapshot",
})
By("inserting test data and creating WALs on the cluster to be snapshotted", func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterToSnapshotName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -664,7 +697,7 @@ var _ = Describe("Verify Volume Snapshot",
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToSnapshotName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertCreateTestData(env, tableLocator)
@@ -680,7 +713,8 @@ var _ = Describe("Verify Volume Snapshot",
By("creating a snapshot and waiting until it's completed", func() {
var err error
backupName := fmt.Sprintf("%s-online", clusterToSnapshotName)
- backup, err = testUtils.CreateBackup(
+ backupTaken, err = backups.Create(
+ env.Ctx, env.Client,
apiv1.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -692,7 +726,6 @@ var _ = Describe("Verify Volume Snapshot",
Cluster: apiv1.LocalObjectReference{Name: clusterToSnapshotName},
},
},
- env,
)
Expect(err).ToNot(HaveOccurred())
@@ -700,42 +733,43 @@ var _ = Describe("Verify Volume Snapshot",
err = env.Client.Get(env.Ctx, types.NamespacedName{
Namespace: namespace,
Name: backupName,
- }, backup)
+ }, backupTaken)
g.Expect(err).ToNot(HaveOccurred())
- g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted),
+ g.Expect(backupTaken.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted),
"Backup should be completed correctly, error message is '%s'",
- backup.Status.Error)
- g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2))
- g.Expect(backup.Status.BackupLabelFile).ToNot(BeEmpty())
- }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed())
+ backupTaken.Status.Error)
+ g.Expect(backupTaken.Status.BackupSnapshotStatus.Elements).To(HaveLen(2))
+ g.Expect(backupTaken.Status.BackupLabelFile).ToNot(BeEmpty())
+ }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed())
})
By("fetching the volume snapshots", func() {
- snapshotList, err := getSnapshots(backup.Name, clusterToSnapshotName, namespace)
+ snapshotList, err := getSnapshots(backupTaken.Name, clusterToSnapshotName, namespace)
Expect(err).ToNot(HaveOccurred())
- Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements)))
+ Expect(snapshotList.Items).To(HaveLen(len(backupTaken.Status.BackupSnapshotStatus.Elements)))
- envVars := testUtils.EnvVarsForSnapshots{
+ envVars := storage.EnvVarsForSnapshots{
DataSnapshot: snapshotDataEnv,
WalSnapshot: snapshotWalEnv,
}
- err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars)
+ err = storage.SetSnapshotNameAsEnv(&snapshotList, backupTaken, envVars)
Expect(err).ToNot(HaveOccurred())
})
- clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterSnapshotRestoreFile)
+ clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterSnapshotRestoreFile)
Expect(err).ToNot(HaveOccurred())
By("creating the cluster to be restored through snapshot and PITR", func() {
AssertCreateCluster(namespace, clusterToRestoreName, clusterSnapshotRestoreFile, env)
- AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], env)
+ AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow],
+ env)
})
By("verifying the correct data exists in the restored cluster", func() {
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToRestoreName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 4)
@@ -746,11 +780,14 @@ var _ = Describe("Verify Volume Snapshot",
// insert some data after the snapshot is taken, we want to verify the data exists in
// the new pod when cluster scaled up
By("inserting more test data and creating WALs on the cluster snapshotted", func() {
- forward, conn, err := testUtils.ForwardPSQLConnection(
- env,
+ forward, conn, err := postgres.ForwardPSQLConnection(
+ env.Ctx,
+ env.Client,
+ env.Interface,
+ env.RestClientConfig,
namespace,
clusterToSnapshotName,
- testUtils.AppDBName,
+ postgres.AppDBName,
apiv1.ApplicationUserSecretSuffix,
)
defer func() {
@@ -768,37 +805,38 @@ var _ = Describe("Verify Volume Snapshot",
// reuse the snapshot taken from the clusterToSnapshot cluster
By("fetching the volume snapshots", func() {
- snapshotList, err := getSnapshots(backup.Name, clusterToSnapshotName, namespace)
+ snapshotList, err := getSnapshots(backupTaken.Name, clusterToSnapshotName, namespace)
Expect(err).ToNot(HaveOccurred())
- Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements)))
+ Expect(snapshotList.Items).To(HaveLen(len(backupTaken.Status.BackupSnapshotStatus.Elements)))
- envVars := testUtils.EnvVarsForSnapshots{
+ envVars := storage.EnvVarsForSnapshots{
DataSnapshot: snapshotDataEnv,
WalSnapshot: snapshotWalEnv,
}
- err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars)
+ err = storage.SetSnapshotNameAsEnv(&snapshotList, backupTaken, envVars)
Expect(err).ToNot(HaveOccurred())
})
By("scale up the cluster", func() {
- err := env.ScaleClusterSize(namespace, clusterToSnapshotName, 3)
+ err := clusterutils.ScaleSize(env.Ctx, env.Client, namespace, clusterToSnapshotName, 3)
Expect(err).ToNot(HaveOccurred())
})
By("checking the the cluster is working", func() {
// Setting up a cluster with three pods is slow, usually 200-600s
- AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[timeouts.ClusterIsReady], env)
})
// we need to verify the streaming replica continue works
By("verifying the correct data exists in the new pod of the scaled cluster", func() {
- podList, err := env.GetClusterReplicas(namespace, clusterToSnapshotName)
+ podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace,
+ clusterToSnapshotName)
Expect(err).ToNot(HaveOccurred())
Expect(podList.Items).To(HaveLen(2))
tableLocator := TableLocator{
Namespace: namespace,
ClusterName: clusterToSnapshotName,
- DatabaseName: testUtils.AppDBName,
+ DatabaseName: postgres.AppDBName,
TableName: tableName,
}
AssertDataExpectedCount(env, tableLocator, 6)
diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go
index 03906b7152..5314ec6b7f 100644
--- a/tests/e2e/wal_restore_parallel_test.go
+++ b/tests/e2e/wal_restore_parallel_test.go
@@ -23,7 +23,11 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -63,19 +67,20 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
)
const namespacePrefix = "pg-backup-minio-wal-max-parallel"
- clusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile)
+ clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile)
Expect(err).ToNot(HaveOccurred())
- namespace, err = env.CreateUniqueTestNamespace(namespacePrefix)
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
Expect(err).ToNot(HaveOccurred())
By("creating the credentials for minio", func() {
- _, err = testUtils.CreateObjectStorageSecret(
+ _, err = secrets.CreateObjectStorageSecret(
+ env.Ctx,
+ env.Client,
namespace,
"backup-storage-creds",
"minio",
"minio123",
- env,
)
Expect(err).ToNot(HaveOccurred())
})
@@ -89,12 +94,12 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env)
// Get the primary
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
primary = pod.GetName()
// Get the standby
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, po := range podList.Items {
if po.Name != primary {
@@ -108,7 +113,7 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// Make sure both Wal-archive and Minio work
// Create a WAL on the primary and check if it arrives at minio, within a short time
By("archiving WALs and verifying they exist", func() {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
primary := pod.GetName()
latestWAL = switchWalAndGetLatestArchive(namespace, primary)
@@ -146,8 +151,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
By("asserting the spool directory is empty on the standby", func() {
if !testUtils.TestDirectoryEmpty(namespace, standby, SpoolDirectory) {
purgeSpoolDirectoryCmd := "rm " + SpoolDirectory + "/*"
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -161,8 +167,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// exit code 0, #1 is in the output location, #2 and #3 are in the spool directory.
// The flag is unset.
By("invoking the wal-restore command requesting #1 wal", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -194,8 +201,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// exit code 0, #2 is in the output location, #3 is in the spool directory.
// The flag is unset.
By("invoking the wal-restore command requesting #2 wal", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -223,8 +231,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// exit code 0, #3 is in the output location, spool directory is empty.
// The flag is unset.
By("invoking the wal-restore command requesting #3 wal", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -245,8 +254,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// exit code 0, #4 is in the output location, #5 is in the spool directory.
// The flag is set because #6 file not present.
By("invoking the wal-restore command requesting #4 wal", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -281,8 +291,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// Expected outcome:
// exit code 0, #5 is in the output location, no files in the spool directory. The flag is still present.
By("invoking the wal-restore command requesting #5 wal", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -309,8 +320,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// Expected outcome:
// exit code 1, output location untouched, no files in the spool directory. The flag is unset.
By("invoking the wal-restore command requesting #6 wal", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
@@ -332,8 +344,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// exit code 0, #6 is in the output location, no files in the spool directory.
// The flag is present again because #7 and #8 are unavailable.
By("invoking the wal-restore command requesting #6 wal again", func() {
- _, _, err := env.ExecCommandInInstancePod(
- testUtils.PodLocator{
+ _, _, err := exec.CommandInInstancePod(
+ env.Ctx, env.Client, env.Interface, env.RestClientConfig,
+ exec.PodLocator{
Namespace: namespace,
PodName: standby,
}, nil,
diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go
index 6294f27a53..2171638e14 100644
--- a/tests/e2e/webhook_test.go
+++ b/tests/e2e/webhook_test.go
@@ -22,7 +22,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -57,7 +58,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
})
BeforeAll(func() {
- clusterName, err = env.GetResourceNameFromYAML(sampleFile)
+ clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile)
Expect(err).ToNot(HaveOccurred())
})
@@ -66,16 +67,16 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
clusterIsDefaulted = true
By("having a deployment for the operator in state ready", func() {
// Make sure that we have at least one operator already working
- err := env.ScaleOperatorDeployment(1)
+ err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1)
Expect(err).ToNot(HaveOccurred())
- ready, err := env.IsOperatorDeploymentReady()
+ ready, err := operator.IsDeploymentReady(env.Ctx, env.Client)
Expect(err).ShouldNot(HaveOccurred())
Expect(ready).To(BeTrue())
})
// Create a basic PG cluster
- webhookNamespace, err := env.CreateUniqueTestNamespace(webhookNamespacePrefix)
+ webhookNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, webhookNamespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env)
// Check if cluster is ready and the default values are populated
@@ -86,7 +87,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
webhookNamespacePrefix := "no-webhook-test"
clusterIsDefaulted = true
- mWebhook, admissionNumber, err := utils.GetCNPGsMutatingWebhookByName(env, mutatingWebhook)
+ mWebhook, admissionNumber, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutatingWebhook)
Expect(err).ToNot(HaveOccurred())
// Add a namespace selector to MutatingWebhooks and ValidatingWebhook, this will assign the webhooks
@@ -96,11 +97,13 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
newWebhook.Webhooks[admissionNumber].NamespaceSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "value"},
}
- err := utils.UpdateCNPGsMutatingWebhookConf(env, newWebhook)
+ err := operator.UpdateMutatingWebhookConf(env.Ctx, env.Interface, newWebhook)
Expect(err).ToNot(HaveOccurred())
})
- vWebhook, admissionNumber, err := utils.GetCNPGsValidatingWebhookByName(env, validatingWebhook)
+ vWebhook, admissionNumber, err := operator.GetValidatingWebhookByName(
+ env.Ctx, env.Client, validatingWebhook,
+ )
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("Disabling the validating webhook %v namespace", operatorNamespace), func() {
@@ -108,12 +111,12 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
newWebhook.Webhooks[admissionNumber].NamespaceSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "value"},
}
- err := utils.UpdateCNPGsValidatingWebhookConf(env, newWebhook)
+ err := operator.UpdateValidatingWebhookConf(env.Ctx, env.Interface, newWebhook)
Expect(err).ToNot(HaveOccurred())
})
// Create a basic PG cluster
- webhookNamespace, err = env.CreateUniqueTestNamespace(webhookNamespacePrefix)
+ webhookNamespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, webhookNamespacePrefix)
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env)
// Check if cluster is ready and has no default value in the object
@@ -121,7 +124,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
// Make sure the operator is intact and not crashing
By("having a deployment for the operator in state ready", func() {
- ready, err := env.IsOperatorDeploymentReady()
+ ready, err := operator.IsDeploymentReady(env.Ctx, env.Client)
Expect(err).ShouldNot(HaveOccurred())
Expect(ready).To(BeTrue())
})
diff --git a/tests/levels.go b/tests/levels.go
index 2f6475755e..9209f49fd2 100644
--- a/tests/levels.go
+++ b/tests/levels.go
@@ -20,7 +20,7 @@ import (
"os"
"strconv"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment"
)
// Level - Define test importance. Each test should define its own importance
@@ -46,13 +46,13 @@ const defaultTestDepth = int(Medium)
// TestEnvLevel struct for operator testing
type TestEnvLevel struct {
- *utils.TestingEnvironment
+ *environment.TestingEnvironment
Depth int
}
// TestLevel creates the environment for testing
func TestLevel() (*TestEnvLevel, error) {
- env, err := utils.NewTestingEnvironment()
+ env, err := environment.NewTestingEnvironment()
if err != nil {
return nil, err
}
diff --git a/tests/utils/azurite.go b/tests/utils/backups/azurite.go
similarity index 88%
rename from tests/utils/azurite.go
rename to tests/utils/backups/azurite.go
index 7ea3ed7903..47bf7e22ce 100644
--- a/tests/utils/azurite.go
+++ b/tests/utils/backups/azurite.go
@@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package backups
import (
+ "context"
"encoding/json"
"fmt"
"os"
@@ -29,9 +30,15 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
)
const (
@@ -46,7 +53,8 @@ type AzureConfiguration struct {
BlobContainer string
}
-func newAzureConfigurationFromEnv() AzureConfiguration {
+// NewAzureConfigurationFromEnv creates a new AzureConfiguration from the environment variables
+func NewAzureConfigurationFromEnv() AzureConfiguration {
return AzureConfiguration{
StorageAccount: os.Getenv("AZURE_STORAGE_ACCOUNT"),
StorageKey: os.Getenv("AZURE_STORAGE_KEY"),
@@ -56,14 +64,19 @@ func newAzureConfigurationFromEnv() AzureConfiguration {
// CreateCertificateSecretsOnAzurite will create secrets for Azurite deployment
func CreateCertificateSecretsOnAzurite(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
clusterName,
azuriteCaSecName,
azuriteTLSSecName string,
- env *TestingEnvironment,
) error {
// create CA certificates
- _, caPair, err := CreateSecretCA(namespace, clusterName, azuriteCaSecName, true, env)
+ _, caPair, err := secrets.CreateSecretCA(
+ ctx, crudClient,
+ namespace, clusterName, azuriteCaSecName,
+ true,
+ )
if err != nil {
return err
}
@@ -75,7 +88,7 @@ func CreateCertificateSecretsOnAzurite(
return err
}
serverSecret := serverPair.GenerateCertificateSecret(namespace, azuriteTLSSecName)
- err = env.Client.Create(env.Ctx, serverSecret)
+ err = crudClient.Create(ctx, serverSecret)
if err != nil {
return err
}
@@ -83,15 +96,23 @@ func CreateCertificateSecretsOnAzurite(
}
// CreateStorageCredentialsOnAzurite will create credentials for Azurite
-func CreateStorageCredentialsOnAzurite(namespace string, env *TestingEnvironment) error {
+func CreateStorageCredentialsOnAzurite(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) error {
azuriteSecrets := getStorageCredentials(namespace)
- return env.Client.Create(env.Ctx, &azuriteSecrets)
+ return crudClient.Create(ctx, &azuriteSecrets)
}
// InstallAzurite will set up Azurite in defined namespace and creates service
-func InstallAzurite(namespace string, env *TestingEnvironment) error {
+func InstallAzurite(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) error {
azuriteDeployment := getAzuriteDeployment(namespace)
- err := env.Client.Create(env.Ctx, &azuriteDeployment)
+ err := crudClient.Create(ctx, &azuriteDeployment)
if err != nil {
return err
}
@@ -101,23 +122,27 @@ func InstallAzurite(namespace string, env *TestingEnvironment) error {
Name: "azurite",
}
deployment := &apiv1.Deployment{}
- err = env.Client.Get(env.Ctx, deploymentNamespacedName, deployment)
+ err = crudClient.Get(ctx, deploymentNamespacedName, deployment)
if err != nil {
return err
}
- err = DeploymentWaitForReady(env, deployment, 300)
+ err = deployments.WaitForReady(ctx, crudClient, deployment, 300)
if err != nil {
return err
}
azuriteService := getAzuriteService(namespace)
- err = env.Client.Create(env.Ctx, &azuriteService)
+ err = crudClient.Create(ctx, &azuriteService)
return err
}
// InstallAzCli will install Az cli
-func InstallAzCli(namespace string, env *TestingEnvironment) error {
+func InstallAzCli(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) error {
azCLiPod := getAzuriteClientPod(namespace)
- err := PodCreateAndWaitForReady(env, &azCLiPod, 180)
+ err := pods.CreateAndWaitForReady(ctx, crudClient, &azCLiPod, 180)
if err != nil {
return err
}
@@ -355,6 +380,8 @@ func getStorageCredentials(namespace string) corev1.Secret {
// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster
// backup with PITR
func CreateClusterFromExternalClusterBackupWithPITROnAzure(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
externalClusterName,
sourceClusterName,
@@ -362,7 +389,6 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzure(
storageCredentialsSecretName,
azStorageAccount,
azBlobContainer string,
- env *TestingEnvironment,
) (*v1.Cluster, error) {
storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/",
@@ -428,7 +454,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzure(
},
},
}
- obj, err := CreateObject(env, restoreCluster)
+ obj, err := objects.Create(ctx, crudClient, restoreCluster)
if err != nil {
return nil, err
}
@@ -442,11 +468,12 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzure(
// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external
// cluster backup with PITR
func CreateClusterFromExternalClusterBackupWithPITROnAzurite(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
externalClusterName,
sourceClusterName,
targetTime string,
- env *TestingEnvironment,
) (*v1.Cluster, error) {
storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName)
@@ -511,7 +538,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzurite(
},
},
}
- obj, err := CreateObject(env, restoreCluster)
+ obj, err := objects.Create(ctx, crudClient, restoreCluster)
if err != nil {
return nil, err
}
@@ -550,7 +577,7 @@ func CountFilesOnAzureBlobStorage(
path string,
) (int, error) {
azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path)
- out, _, err := RunUnchecked(azBlobListCmd)
+ out, _, err := run.Unchecked(azBlobListCmd)
if err != nil {
return -1, err
}
@@ -566,7 +593,7 @@ func CountFilesOnAzuriteBlobStorage(
path string,
) (int, error) {
azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path)
- out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+
+ out, _, err := run.Unchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+
"-- /bin/bash -c '%v'", namespace, azBlobListCmd))
if err != nil {
return -1, err
@@ -579,7 +606,7 @@ func CountFilesOnAzuriteBlobStorage(
// verifySASTokenWriteActivity returns true if the given token has RW permissions,
// otherwise it returns false
func verifySASTokenWriteActivity(containerName string, id string, key string) bool {
- _, _, err := RunUnchecked(fmt.Sprintf("az storage container create "+
+ _, _, err := run.Unchecked(fmt.Sprintf("az storage container create "+
"--name %v --account-name %v "+
"--sas-token %v", containerName, id, key))
@@ -587,7 +614,11 @@ func verifySASTokenWriteActivity(containerName string, id string, key string) bo
}
// CreateSASTokenCredentials generates Secrets for the Azure Blob Storage
-func CreateSASTokenCredentials(namespace string, id string, key string, env *TestingEnvironment) error {
+func CreateSASTokenCredentials(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, id, key string,
+) error {
// Adding 24 hours to the current time
date := time.Now().UTC().Add(time.Hour * 24)
// Creating date time format for az command
@@ -598,7 +629,7 @@ func CreateSASTokenCredentials(namespace string, id string, key string, env *Tes
date.Hour(),
date.Minute())
- out, _, err := Run(fmt.Sprintf(
+ out, _, err := run.Run(fmt.Sprintf(
// SAS Token at Blob Container level does not currently work in Barman Cloud
// https://github.com/EnterpriseDB/barman/issues/388
// we will use SAS Token at Storage Account level
@@ -616,7 +647,7 @@ func CreateSASTokenCredentials(namespace string, id string, key string, env *Tes
}
SASTokenRW := strings.TrimRight(out, "\n")
- out, _, err = Run(fmt.Sprintf(
+ out, _, err = run.Run(fmt.Sprintf(
"az storage account generate-sas --account-name %v "+
"--https-only --permissions lr --account-key %v "+
"--resource-types co --services b --expiry %v -o tsv",
@@ -631,12 +662,19 @@ func CreateSASTokenCredentials(namespace string, id string, key string, env *Tes
return fmt.Errorf("expected token to be ready only")
}
- _, err = CreateObjectStorageSecret(namespace, "backup-storage-creds-sas", id, SASTokenRW, env)
+ _, err = secrets.CreateObjectStorageSecret(
+ ctx, crudClient,
+ namespace, "backup-storage-creds-sas",
+ id, SASTokenRW,
+ )
if err != nil {
return err
}
- _, err = CreateObjectStorageSecret(namespace, "restore-storage-creds-sas", id, SASTokenRO, env)
+ _, err = secrets.CreateObjectStorageSecret(ctx, crudClient,
+ namespace, "restore-storage-creds-sas",
+ id, SASTokenRO,
+ )
if err != nil {
return err
}
diff --git a/tests/utils/backup.go b/tests/utils/backups/backup.go
similarity index 67%
rename from tests/utils/backup.go
rename to tests/utils/backups/backup.go
index 9ef2aadd9c..67c04f10ec 100644
--- a/tests/utils/backup.go
+++ b/tests/utils/backups/backup.go
@@ -14,64 +14,191 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package backups
import (
+ "context"
"fmt"
"os"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
+ "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/gomega"
+ v2 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
-
- . "github.com/onsi/ginkgo/v2" // nolint
- . "github.com/onsi/gomega" // nolint
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml"
)
-// ExecuteBackup performs a backup and checks the backup status
-func ExecuteBackup(
+// List gathers the current list of backup in namespace
+func List(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*apiv1.BackupList, error) {
+ backupList := &apiv1.BackupList{}
+ err := crudClient.List(
+ ctx, backupList, client.InNamespace(namespace),
+ )
+ return backupList, err
+}
+
+// Create creates a Backup resource for a given cluster name
+func Create(
+ ctx context.Context,
+ crudClient client.Client,
+ targetBackup apiv1.Backup,
+) (*apiv1.Backup, error) {
+ obj, err := objects.Create(ctx, crudClient, &targetBackup)
+ if err != nil {
+ return nil, err
+ }
+ backup, ok := obj.(*apiv1.Backup)
+ if !ok {
+ return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj)
+ }
+ return backup, nil
+}
+
+// GetVolumeSnapshot gets a VolumeSnapshot given name and namespace
+func GetVolumeSnapshot(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, name string,
+) (*v1.VolumeSnapshot, error) {
+ namespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }
+ volumeSnapshot := &v1.VolumeSnapshot{}
+ err := objects.Get(ctx, crudClient, namespacedName, volumeSnapshot)
+ if err != nil {
+ return nil, err
+ }
+ return volumeSnapshot, nil
+}
+
+// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status
+// eventually returns true
+func AssertBackupConditionInClusterStatus(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) {
+ ginkgo.By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() {
+ gomega.Eventually(func() (string, error) {
+ getBackupCondition, err := GetConditionsInClusterStatus(
+ ctx, crudClient,
+ namespace, clusterName,
+ apiv1.ConditionBackup,
+ )
+ if err != nil {
+ return "", err
+ }
+ return string(getBackupCondition.Status), nil
+ }, 300, 5).Should(gomega.BeEquivalentTo("True"))
+ })
+}
+
+// CreateOnDemandBackupViaKubectlPlugin uses the kubectl plugin to create a backup
+func CreateOnDemandBackupViaKubectlPlugin(
+ namespace,
+ clusterName,
+ backupName string,
+ target apiv1.BackupTarget,
+ method apiv1.BackupMethod,
+) error {
+ command := fmt.Sprintf("kubectl cnpg backup %v -n %v", clusterName, namespace)
+
+ if backupName != "" {
+ command = fmt.Sprintf("%v --backup-name %v", command, backupName)
+ }
+ if target != "" {
+ command = fmt.Sprintf("%v --backup-target %v", command, target)
+ }
+ if method != "" {
+ command = fmt.Sprintf("%v --method %v", command, method)
+ }
+
+ _, _, err := run.Run(command)
+ return err
+}
+
+// GetConditionsInClusterStatus get conditions values as given type from cluster object status
+func GetConditionsInClusterStatus(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace,
+ clusterName string,
+ conditionType apiv1.ClusterConditionType,
+) (*v2.Condition, error) {
+ var cluster *apiv1.Cluster
+ var err error
+
+ cluster, err = clusterutils.Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, cond := range cluster.Status.Conditions {
+ if cond.Type == string(conditionType) {
+ return &cond, nil
+ }
+ }
+
+ return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType)
+}
+
+// Execute performs a backup and checks the backup status
+func Execute(
+ ctx context.Context,
+ crudClient client.Client,
+ scheme *runtime.Scheme,
namespace,
backupFile string,
onlyTargetStandbys bool,
timeoutSeconds int,
- env *TestingEnvironment,
) *apiv1.Backup {
- backupName, err := env.GetResourceNameFromYAML(backupFile)
- Expect(err).ToNot(HaveOccurred())
- Eventually(func() error {
- _, stderr, err := RunUnchecked("kubectl apply -n " + namespace + " -f " + backupFile)
+ backupName, err := yaml.GetResourceNameFromYAML(scheme, backupFile)
+ gomega.Expect(err).ToNot(gomega.HaveOccurred())
+ gomega.Eventually(func() error {
+ _, stderr, err := run.Unchecked("kubectl apply -n " + namespace + " -f " + backupFile)
if err != nil {
return fmt.Errorf("could not create backup.\nStdErr: %v\nError: %v", stderr, err)
}
return nil
- }, RetryTimeout, PollingTime).Should(Succeed())
+ }, 60, objects.PollingTime).Should(gomega.Succeed())
backupNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: backupName,
}
backup := &apiv1.Backup{}
// Verifying backup status
- Eventually(func() (apiv1.BackupPhase, error) {
- err = env.Client.Get(env.Ctx, backupNamespacedName, backup)
+ gomega.Eventually(func() (apiv1.BackupPhase, error) {
+ err = crudClient.Get(ctx, backupNamespacedName, backup)
return backup.Status.Phase, err
- }, timeoutSeconds).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted))
- Eventually(func() (string, error) {
- err = env.Client.Get(env.Ctx, backupNamespacedName, backup)
+ }, timeoutSeconds).Should(gomega.BeEquivalentTo(apiv1.BackupPhaseCompleted))
+ gomega.Eventually(func() (string, error) {
+ err = crudClient.Get(ctx, backupNamespacedName, backup)
if err != nil {
return "", err
}
backupStatus := backup.GetStatus()
return backupStatus.BeginLSN, err
- }, timeoutSeconds).ShouldNot(BeEmpty())
+ }, timeoutSeconds).ShouldNot(gomega.BeEmpty())
var cluster *apiv1.Cluster
- Eventually(func() error {
+ gomega.Eventually(func() error {
var err error
- cluster, err = env.GetCluster(namespace, backup.Spec.Cluster.Name)
+ cluster, err = clusterutils.Get(ctx, crudClient, namespace, backup.Spec.Cluster.Name)
return err
- }, timeoutSeconds).ShouldNot(HaveOccurred())
+ }, timeoutSeconds).ShouldNot(gomega.HaveOccurred())
backupStatus := backup.GetStatus()
if cluster.Spec.Backup != nil {
@@ -81,36 +208,38 @@ func ExecuteBackup(
}
switch backupTarget {
case apiv1.BackupTargetPrimary, "":
- Expect(backupStatus.InstanceID.PodName).To(BeEquivalentTo(cluster.Status.TargetPrimary))
+ gomega.Expect(backupStatus.InstanceID.PodName).To(gomega.BeEquivalentTo(cluster.Status.TargetPrimary))
case apiv1.BackupTargetStandby:
- Expect(backupStatus.InstanceID.PodName).To(BeElementOf(cluster.Status.InstanceNames))
+ gomega.Expect(backupStatus.InstanceID.PodName).To(gomega.BeElementOf(cluster.Status.InstanceNames))
if onlyTargetStandbys {
- Expect(backupStatus.InstanceID.PodName).NotTo(Equal(cluster.Status.TargetPrimary))
+ gomega.Expect(backupStatus.InstanceID.PodName).NotTo(gomega.Equal(cluster.Status.TargetPrimary))
}
}
}
- Expect(backupStatus.BeginWal).NotTo(BeEmpty())
- Expect(backupStatus.EndLSN).NotTo(BeEmpty())
- Expect(backupStatus.EndWal).NotTo(BeEmpty())
+ gomega.Expect(backupStatus.BeginWal).NotTo(gomega.BeEmpty())
+ gomega.Expect(backupStatus.EndLSN).NotTo(gomega.BeEmpty())
+ gomega.Expect(backupStatus.EndWal).NotTo(gomega.BeEmpty())
return backup
}
// CreateClusterFromBackupUsingPITR creates a cluster from backup, using the PITR
func CreateClusterFromBackupUsingPITR(
+ ctx context.Context,
+ crudClient client.Client,
+ scheme *runtime.Scheme,
namespace,
clusterName,
backupFilePath,
targetTime string,
- env *TestingEnvironment,
) (*apiv1.Cluster, error) {
- backupName, err := env.GetResourceNameFromYAML(backupFilePath)
+ backupName, err := yaml.GetResourceNameFromYAML(scheme, backupFilePath)
if err != nil {
return nil, err
}
storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
restoreCluster := &apiv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{
+ ObjectMeta: v2.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
@@ -148,7 +277,7 @@ func CreateClusterFromBackupUsingPITR(
},
},
}
- obj, err := CreateObject(env, restoreCluster)
+ obj, err := objects.Create(ctx, crudClient, restoreCluster)
if err != nil {
return nil, err
}
@@ -162,16 +291,17 @@ func CreateClusterFromBackupUsingPITR(
// CreateClusterFromExternalClusterBackupWithPITROnMinio creates a cluster on Minio, starting from an external cluster
// backup with PITR
func CreateClusterFromExternalClusterBackupWithPITROnMinio(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
externalClusterName,
sourceClusterName,
targetTime string,
- env *TestingEnvironment,
) (*apiv1.Cluster, error) {
storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
restoreCluster := &apiv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{
+ ObjectMeta: v2.ObjectMeta{
Name: externalClusterName,
Namespace: namespace,
},
@@ -237,7 +367,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio(
},
},
}
- obj, err := CreateObject(env, restoreCluster)
+ obj, err := objects.Create(ctx, crudClient, restoreCluster)
if err != nil {
return nil, err
}
@@ -248,67 +378,20 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio(
return cluster, nil
}
-// GetConditionsInClusterStatus get conditions values as given type from cluster object status
-func GetConditionsInClusterStatus(
- namespace,
- clusterName string,
- env *TestingEnvironment,
- conditionType apiv1.ClusterConditionType,
-) (*metav1.Condition, error) {
- var cluster *apiv1.Cluster
- var err error
-
- cluster, err = env.GetCluster(namespace, clusterName)
- if err != nil {
- return nil, err
- }
-
- for _, cond := range cluster.Status.Conditions {
- if cond.Type == string(conditionType) {
- return &cond, nil
- }
- }
-
- return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType)
-}
-
-// CreateOnDemandBackupViaKubectlPlugin uses the kubectl plugin to create a backup
-func CreateOnDemandBackupViaKubectlPlugin(
- namespace,
- clusterName,
- backupName string,
- target apiv1.BackupTarget,
- method apiv1.BackupMethod,
-) error {
- command := fmt.Sprintf("kubectl cnpg backup %v -n %v", clusterName, namespace)
-
- if backupName != "" {
- command = fmt.Sprintf("%v --backup-name %v", command, backupName)
- }
- if target != "" {
- command = fmt.Sprintf("%v --backup-target %v", command, target)
- }
- if method != "" {
- command = fmt.Sprintf("%v --method %v", command, method)
- }
-
- _, _, err := Run(command)
- return err
-}
-
-// CreateOnDemandBackup creates a Backup resource for a given cluster name
-// Deprecated: Use CreateBackup.
+// CreateOnDemand creates a Backup resource for a given cluster name
+// Deprecated: Use Create.
// TODO: eradicate
-func CreateOnDemandBackup(
+func CreateOnDemand(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
clusterName,
backupName string,
target apiv1.BackupTarget,
method apiv1.BackupMethod,
- env *TestingEnvironment,
) (*apiv1.Backup, error) {
targetBackup := &apiv1.Backup{
- ObjectMeta: metav1.ObjectMeta{
+ ObjectMeta: v2.ObjectMeta{
Name: backupName,
Namespace: namespace,
},
@@ -326,23 +409,7 @@ func CreateOnDemandBackup(
targetBackup.Spec.Method = method
}
- obj, err := CreateObject(env, targetBackup)
- if err != nil {
- return nil, err
- }
- backup, ok := obj.(*apiv1.Backup)
- if !ok {
- return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj)
- }
- return backup, nil
-}
-
-// CreateBackup creates a Backup resource for a given cluster name
-func CreateBackup(
- targetBackup apiv1.Backup,
- env *TestingEnvironment,
-) (*apiv1.Backup, error) {
- obj, err := CreateObject(env, &targetBackup)
+ obj, err := objects.Create(ctx, crudClient, targetBackup)
if err != nil {
return nil, err
}
@@ -352,35 +419,3 @@ func CreateBackup(
}
return backup, nil
}
-
-// GetVolumeSnapshot gets a VolumeSnapshot given name and namespace
-func (env TestingEnvironment) GetVolumeSnapshot(
- namespace,
- name string,
-) (*volumesnapshot.VolumeSnapshot, error) {
- namespacedName := types.NamespacedName{
- Namespace: namespace,
- Name: name,
- }
- volumeSnapshot := &volumesnapshot.VolumeSnapshot{}
- err := GetObject(&env, namespacedName, volumeSnapshot)
- if err != nil {
- return nil, err
- }
- return volumeSnapshot, nil
-}
-
-// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status
-// eventually returns true
-func AssertBackupConditionInClusterStatus(env *TestingEnvironment, namespace, clusterName string) {
- By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() {
- Eventually(func() (string, error) {
- getBackupCondition, err := GetConditionsInClusterStatus(
- namespace, clusterName, env, apiv1.ConditionBackup)
- if err != nil {
- return "", err
- }
- return string(getBackupCondition.Status), nil
- }, 300, 5).Should(BeEquivalentTo("True"))
- })
-}
diff --git a/tests/utils/backups/doc.go b/tests/utils/backups/doc.go
new file mode 100644
index 0000000000..3064ea0449
--- /dev/null
+++ b/tests/utils/backups/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package backups provides backup utilities
+package backups
diff --git a/tests/utils/certificates.go b/tests/utils/certificates.go
deleted file mode 100644
index f6d08cdcad..0000000000
--- a/tests/utils/certificates.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "fmt"
-
- corev1 "k8s.io/api/core/v1"
- ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
-
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
-)
-
-// CreateClientCertificatesViaKubectlPlugin creates a certificate for a given user on a given cluster
-func CreateClientCertificatesViaKubectlPlugin(
- cluster apiv1.Cluster,
- certName string,
- userName string,
- env *TestingEnvironment,
-) error {
- // clientCertName := "cluster-cert"
- // user := "app"
- // Create the certificate
- _, _, err := Run(fmt.Sprintf(
- "kubectl cnpg certificate %v --cnpg-cluster %v --cnpg-user %v -n %v",
- certName,
- cluster.Name,
- userName,
- cluster.Namespace))
- if err != nil {
- return err
- }
- // Verifying client certificate secret existence
- secret := &corev1.Secret{}
- err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: certName}, secret)
- return err
-}
diff --git a/tests/utils/cloud_vendor.go b/tests/utils/cloudvendors/cloud_vendor.go
similarity index 96%
rename from tests/utils/cloud_vendor.go
rename to tests/utils/cloudvendors/cloud_vendor.go
index 1f3062a15b..be50b780db 100644
--- a/tests/utils/cloud_vendor.go
+++ b/tests/utils/cloudvendors/cloud_vendor.go
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package cloudvendors provides the variables to define on which cloud vendor the e2e test is running
+package cloudvendors
import (
"fmt"
diff --git a/tests/utils/cluster.go b/tests/utils/cluster.go
deleted file mode 100644
index de6a301c83..0000000000
--- a/tests/utils/cluster.go
+++ /dev/null
@@ -1,409 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "text/tabwriter"
-
- "github.com/cheynewallace/tabby"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
-)
-
-// AllClusterPodsHaveLabels verifies if the labels defined in a map are included
-// in all the pods of a cluster
-func AllClusterPodsHaveLabels(
- env *TestingEnvironment,
- namespace, clusterName string,
- labels map[string]string,
-) (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return false, err
- }
- podList, err := env.GetClusterPodList(namespace, clusterName)
- if err != nil {
- return false, err
- }
- if len(podList.Items) != cluster.Spec.Instances {
- return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances)
- }
- for _, pod := range podList.Items {
- if !PodHasLabels(pod, labels) {
- return false, fmt.Errorf("%v found labels, expected %v", pod.Labels, labels)
- }
- }
- return true, nil
-}
-
-// AllClusterPodsHaveAnnotations verifies if the annotations defined in a map are included
-// in all the pods of a cluster
-func AllClusterPodsHaveAnnotations(
- env *TestingEnvironment,
- namespace, clusterName string,
- annotations map[string]string,
-) (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return false, err
- }
- podList, err := env.GetClusterPodList(namespace, clusterName)
- if err != nil {
- return false, err
- }
- if len(podList.Items) != cluster.Spec.Instances {
- return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances)
- }
- for _, pod := range podList.Items {
- if !PodHasAnnotations(pod, annotations) {
- return false, fmt.Errorf("%v found annotations, %v expected", pod.Annotations, annotations)
- }
- }
- return true, nil
-}
-
-// ClusterHasLabels verifies that the labels of a cluster contain a specified
-// labels map
-func ClusterHasLabels(
- cluster *apiv1.Cluster,
- labels map[string]string,
-) bool {
- clusterLabels := cluster.Labels
- for k, v := range labels {
- val, ok := clusterLabels[k]
- if !ok || (v != val) {
- return false
- }
- }
- return true
-}
-
-// ClusterHasAnnotations verifies that the annotations of a cluster contain a specified
-// annotations map
-func ClusterHasAnnotations(
- cluster *apiv1.Cluster,
- annotations map[string]string,
-) bool {
- clusterAnnotations := cluster.Annotations
- for k, v := range annotations {
- val, ok := clusterAnnotations[k]
- if !ok || (v != val) {
- return false
- }
- }
- return true
-}
-
-// DumpNamespaceObjects logs the clusters, pods, pvcs etc. found in a namespace as JSON sections
-func (env TestingEnvironment) DumpNamespaceObjects(namespace string, filename string) {
- f, err := os.Create(filepath.Clean(filename))
- if err != nil {
- fmt.Println(err)
- return
- }
- defer func() {
- _ = f.Sync()
- _ = f.Close()
- }()
- w := bufio.NewWriter(f)
- clusterList := &apiv1.ClusterList{}
- _ = GetObjectList(&env, clusterList, client.InNamespace(namespace))
-
- for _, cluster := range clusterList.Items {
- out, _ := json.MarshalIndent(cluster, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v cluster\n", namespace, cluster.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
-
- podList, _ := env.GetPodList(namespace)
- for _, pod := range podList.Items {
- out, _ := json.MarshalIndent(pod, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
-
- pvcList, _ := env.GetPVCList(namespace)
- for _, pvc := range pvcList.Items {
- out, _ := json.MarshalIndent(pvc, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v PVC\n", namespace, pvc.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
-
- jobList, _ := env.GetJobList(namespace)
- for _, job := range jobList.Items {
- out, _ := json.MarshalIndent(job, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v job\n", namespace, job.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
-
- eventList, _ := env.GetEventList(namespace)
- out, _ := json.MarshalIndent(eventList.Items, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping events for namespace %v\n", namespace)
- _, _ = fmt.Fprintln(w, string(out))
-
- serviceAccountList, _ := env.GetServiceAccountList(namespace)
- for _, sa := range serviceAccountList.Items {
- out, _ := json.MarshalIndent(sa, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v serviceaccount\n", namespace, sa.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
-
- suffixes := []string{"-r", "-rw", "-any"}
- for _, cluster := range clusterList.Items {
- for _, suffix := range suffixes {
- namespacedName := types.NamespacedName{
- Namespace: namespace,
- Name: cluster.Name + suffix,
- }
- endpoint := &corev1.Endpoints{}
- _ = env.Client.Get(env.Ctx, namespacedName, endpoint)
- out, _ := json.MarshalIndent(endpoint, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v endpoint\n", namespace, endpoint.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
- }
- // dump backup info
- backupList, _ := env.GetBackupList(namespace)
- // dump backup object info if it's configure
- for _, backup := range backupList.Items {
- out, _ := json.MarshalIndent(backup, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v backup\n", namespace, backup.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
- // dump scheduledbackup info
- scheduledBackupList, _ := env.GetScheduledBackupList(namespace)
- // dump backup object info if it's configure
- for _, scheduledBackup := range scheduledBackupList.Items {
- out, _ := json.MarshalIndent(scheduledBackup, "", " ")
- _, _ = fmt.Fprintf(w, "Dumping %v/%v scheduledbackup\n", namespace, scheduledBackup.Name)
- _, _ = fmt.Fprintln(w, string(out))
- }
-
- err = w.Flush()
- if err != nil {
- fmt.Println(err)
- return
- }
-}
-
-// GetCluster gets a cluster given name and namespace
-func (env TestingEnvironment) GetCluster(namespace string, name string) (*apiv1.Cluster, error) {
- namespacedName := types.NamespacedName{
- Namespace: namespace,
- Name: name,
- }
- cluster := &apiv1.Cluster{}
- err := GetObject(&env, namespacedName, cluster)
- if err != nil {
- return nil, err
- }
- return cluster, nil
-}
-
-// GetClusterPodList gathers the current list of instance pods for a cluster in a namespace
-func (env TestingEnvironment) GetClusterPodList(namespace string, clusterName string) (*corev1.PodList, error) {
- podList := &corev1.PodList{}
- err := GetObjectList(&env, podList, client.InNamespace(namespace),
- client.MatchingLabels{
- utils.ClusterLabelName: clusterName,
- utils.PodRoleLabelName: "instance", // this ensures we are getting instance pods only
- },
- )
- return podList, err
-}
-
-// GetClusterPrimary gets the primary pod of a cluster
-func (env TestingEnvironment) GetClusterPrimary(namespace string, clusterName string) (*corev1.Pod, error) {
- podList := &corev1.PodList{}
-
- err := GetObjectList(&env, podList, client.InNamespace(namespace),
- client.MatchingLabels{
- utils.ClusterLabelName: clusterName,
- utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelPrimary,
- },
- )
- if err != nil {
- return &corev1.Pod{}, err
- }
- if len(podList.Items) > 0 {
- // if there are multiple, get the one without deletion timestamp
- for _, pod := range podList.Items {
- if pod.DeletionTimestamp == nil {
- return &pod, nil
- }
- }
- err = fmt.Errorf("all pod with primary role has deletion timestamp")
- return &(podList.Items[0]), err
- }
- err = fmt.Errorf("no primary found")
- return &corev1.Pod{}, err
-}
-
-// GetClusterReplicas gets a slice containing all the replica pods of a cluster
-func (env TestingEnvironment) GetClusterReplicas(namespace string, clusterName string) (*corev1.PodList, error) {
- podList := &corev1.PodList{}
- err := GetObjectList(&env, podList, client.InNamespace(namespace),
- client.MatchingLabels{
- utils.ClusterLabelName: clusterName,
- utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelReplica,
- },
- )
- if err != nil {
- return podList, err
- }
- if len(podList.Items) > 0 {
- return podList, nil
- }
- err = fmt.Errorf("no replicas found")
- return podList, err
-}
-
-// ScaleClusterSize scales a cluster to the requested size
-func (env TestingEnvironment) ScaleClusterSize(namespace, clusterName string, newClusterSize int) error {
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return err
- }
- originalCluster := cluster.DeepCopy()
- cluster.Spec.Instances = newClusterSize
- err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(originalCluster))
- if err != nil {
- return err
- }
- return nil
-}
-
-// PrintClusterResources prints a summary of the cluster pods, jobs, pvcs etc.
-func PrintClusterResources(namespace, clusterName string, env *TestingEnvironment) string {
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return fmt.Sprintf("Error while Getting Object %v", err)
- }
-
- buffer := &bytes.Buffer{}
- w := tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0)
- clusterInfo := tabby.NewCustom(w)
- clusterInfo.AddLine("Timeout while waiting for cluster ready, dumping more cluster information for analysis...")
- clusterInfo.AddLine()
- clusterInfo.AddLine()
- clusterInfo.AddLine("Cluster information:")
- clusterInfo.AddLine("Name", cluster.GetName())
- clusterInfo.AddLine("Namespace", cluster.GetNamespace())
- clusterInfo.AddLine()
- clusterInfo.AddHeader("Items", "Values")
- clusterInfo.AddLine("Spec.Instances", cluster.Spec.Instances)
- clusterInfo.AddLine("Wal storage", cluster.ShouldCreateWalArchiveVolume())
- clusterInfo.AddLine("Cluster phase", cluster.Status.Phase)
- clusterInfo.AddLine("Phase reason", cluster.Status.PhaseReason)
- clusterInfo.AddLine("Cluster target primary", cluster.Status.TargetPrimary)
- clusterInfo.AddLine("Cluster current primary", cluster.Status.CurrentPrimary)
- clusterInfo.AddLine()
-
- podList, _ := env.GetClusterPodList(cluster.GetNamespace(), cluster.GetName())
-
- clusterInfo.AddLine("Cluster Pods information:")
- clusterInfo.AddLine("Ready pod number: ", utils.CountReadyPods(podList.Items))
- clusterInfo.AddLine()
- clusterInfo.AddHeader("Items", "Values")
- for _, pod := range podList.Items {
- clusterInfo.AddLine("Pod name", pod.Name)
- clusterInfo.AddLine("Pod phase", pod.Status.Phase)
- if cluster.Status.InstancesReportedState != nil {
- if instanceReportState, ok := cluster.Status.InstancesReportedState[apiv1.PodName(pod.Name)]; ok {
- clusterInfo.AddLine("Is Primary", instanceReportState.IsPrimary)
- clusterInfo.AddLine("TimeLineID", instanceReportState.TimeLineID)
- clusterInfo.AddLine("---", "---")
- }
- } else {
- clusterInfo.AddLine("InstanceReportState not reported", "")
- }
- }
-
- clusterInfo.AddLine("Jobs information:")
- clusterInfo.AddLine()
- clusterInfo.AddHeader("Items", "Values")
- jobList, _ := env.GetJobList(cluster.GetNamespace())
- for _, job := range jobList.Items {
- clusterInfo.AddLine("Job name", job.Name)
- clusterInfo.AddLine("Job status", fmt.Sprintf("%#v", job.Status))
- }
-
- pvcList, _ := env.GetPVCList(cluster.GetNamespace())
- clusterInfo.AddLine()
- clusterInfo.AddLine("Cluster PVC information: (dumping all pvc under the namespace)")
- clusterInfo.AddLine("Available Cluster PVCCount", cluster.Status.PVCCount)
- clusterInfo.AddLine()
- clusterInfo.AddHeader("Items", "Values")
- for _, pvc := range pvcList.Items {
- clusterInfo.AddLine("PVC name", pvc.Name)
- clusterInfo.AddLine("PVC phase", pvc.Status.Phase)
- clusterInfo.AddLine("---", "---")
- }
-
- snapshotList, _ := env.GetSnapshotList(cluster.Namespace)
- clusterInfo.AddLine()
- clusterInfo.AddLine("Cluster Snapshot information: (dumping all snapshot under the namespace)")
- clusterInfo.AddLine()
- clusterInfo.AddHeader("Items", "Values")
- for _, snapshot := range snapshotList.Items {
- clusterInfo.AddLine("Snapshot name", snapshot.Name)
- if snapshot.Status.ReadyToUse != nil {
- clusterInfo.AddLine("Snapshot ready to use", *snapshot.Status.ReadyToUse)
- } else {
- clusterInfo.AddLine("Snapshot ready to use", "false")
- }
- clusterInfo.AddLine("---", "---")
- }
-
- // do not remove, this is needed to ensure that the writer cache is always flushed.
- clusterInfo.Print()
-
- return buffer.String()
-}
-
-// DescribeKubernetesNodes prints the `describe node` for each node in the
-// kubernetes cluster
-func (env TestingEnvironment) DescribeKubernetesNodes() (string, error) {
- nodeList, err := env.GetNodeList()
- if err != nil {
- return "", err
- }
- var report strings.Builder
- for _, node := range nodeList.Items {
- command := fmt.Sprintf("kubectl describe node %v", node.Name)
- stdout, _, err := Run(command)
- if err != nil {
- return "", err
- }
- report.WriteString("================================================\n")
- report.WriteString(stdout)
- report.WriteString("================================================\n")
- }
- return report.String(), nil
-}
diff --git a/tests/utils/clusterutils/cluster.go b/tests/utils/clusterutils/cluster.go
new file mode 100644
index 0000000000..b237a0a9c3
--- /dev/null
+++ b/tests/utils/clusterutils/cluster.go
@@ -0,0 +1,227 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package clusterutils provides functions to handle cluster actions
+package clusterutils
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+)
+
+// AllPodsHaveLabels verifies if the labels defined in a map are included
+// in all the pods of a cluster
+func AllPodsHaveLabels(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+ labels map[string]string,
+) (bool, error) {
+ cluster, err := Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return false, err
+ }
+ podList, err := ListPods(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return false, err
+ }
+ if len(podList.Items) != cluster.Spec.Instances {
+ return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances)
+ }
+ for _, pod := range podList.Items {
+ if !pods.HasLabels(pod, labels) {
+ return false, fmt.Errorf("%v found labels, expected %v", pod.Labels, labels)
+ }
+ }
+ return true, nil
+}
+
+// AllPodsHaveAnnotations verifies if the annotations defined in a map are included
+// in all the pods of a cluster
+func AllPodsHaveAnnotations(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+ annotations map[string]string,
+) (bool, error) {
+ cluster, err := Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return false, err
+ }
+ podList, err := ListPods(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return false, err
+ }
+ if len(podList.Items) != cluster.Spec.Instances {
+ return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances)
+ }
+ for _, pod := range podList.Items {
+ if !pods.HasAnnotations(pod, annotations) {
+ return false, fmt.Errorf("%v found annotations, %v expected", pod.Annotations, annotations)
+ }
+ }
+ return true, nil
+}
+
+// HasLabels verifies that the labels of a cluster contain a specified
+// labels map
+func HasLabels(
+ cluster *apiv1.Cluster,
+ labels map[string]string,
+) bool {
+ clusterLabels := cluster.Labels
+ for k, v := range labels {
+ val, ok := clusterLabels[k]
+ if !ok || (v != val) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAnnotations verifies that the annotations of a cluster contain a specified
+// annotations map
+func HasAnnotations(
+ cluster *apiv1.Cluster,
+ annotations map[string]string,
+) bool {
+ clusterAnnotations := cluster.Annotations
+ for k, v := range annotations {
+ val, ok := clusterAnnotations[k]
+ if !ok || (v != val) {
+ return false
+ }
+ }
+ return true
+}
+
+// Get gets a cluster given name and namespace
+func Get(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, name string,
+) (*apiv1.Cluster, error) {
+ namespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }
+ cluster := &apiv1.Cluster{}
+ err := objects.Get(ctx, crudClient, namespacedName, cluster)
+ if err != nil {
+ return nil, err
+ }
+ return cluster, nil
+}
+
+// ListPods gathers the current list of instance pods for a cluster in a namespace
+func ListPods(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) (*corev1.PodList, error) {
+ podList := &corev1.PodList{}
+ err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace),
+ client.MatchingLabels{
+ utils.ClusterLabelName: clusterName,
+ utils.PodRoleLabelName: "instance", // this ensures we are getting instance pods only
+ },
+ )
+ return podList, err
+}
+
+// GetPrimary gets the primary pod of a cluster
+func GetPrimary(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) (*corev1.Pod, error) {
+ podList := &corev1.PodList{}
+
+ err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace),
+ client.MatchingLabels{
+ utils.ClusterLabelName: clusterName,
+ utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelPrimary,
+ },
+ )
+ if err != nil {
+ return &corev1.Pod{}, err
+ }
+ if len(podList.Items) > 0 {
+ // if there are multiple, get the one without deletion timestamp
+ for _, pod := range podList.Items {
+ if pod.DeletionTimestamp == nil {
+ return &pod, nil
+ }
+ }
+ err = fmt.Errorf("all pod with primary role has deletion timestamp")
+ return &(podList.Items[0]), err
+ }
+ err = fmt.Errorf("no primary found")
+ return &corev1.Pod{}, err
+}
+
+// GetReplicas gets a slice containing all the replica pods of a cluster
+func GetReplicas(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) (*corev1.PodList, error) {
+ podList := &corev1.PodList{}
+ err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace),
+ client.MatchingLabels{
+ utils.ClusterLabelName: clusterName,
+ utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelReplica,
+ },
+ )
+ if err != nil {
+ return podList, err
+ }
+ if len(podList.Items) > 0 {
+ return podList, nil
+ }
+ err = fmt.Errorf("no replicas found")
+ return podList, err
+}
+
+// ScaleSize scales a cluster to the requested size
+func ScaleSize(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+ newClusterSize int,
+) error {
+ cluster, err := Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return err
+ }
+ originalCluster := cluster.DeepCopy()
+ cluster.Spec.Instances = newClusterSize
+ err = crudClient.Patch(ctx, cluster, client.MergeFrom(originalCluster))
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/tests/utils/commons.go b/tests/utils/commons.go
deleted file mode 100644
index d3c77a667e..0000000000
--- a/tests/utils/commons.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "fmt"
- "time"
-
- "github.com/avast/retry-go/v4"
- apierrs "k8s.io/apimachinery/pkg/api/errors"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// ForgeArchiveWalOnMinio instead of using `switchWalCmd` to generate a real WAL archive, directly forges a WAL archive
-// file on Minio by copying and renaming an existing WAL archive file for the sake of more control of testing. To make
-// sure the forged one won't be a real WAL archive, we let the sequence in newWALName to be big enough so that it can't
-// be a real WAL archive name in an idle postgresql.
-func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error {
- // Forge a WAL archive by copying and renaming the 1st WAL archive
- minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000"
- existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz"
- newWALNamePath := minioWALBasePath + "/" + newWALName
- forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath
- _, _, err := RunUncheckedRetry(fmt.Sprintf(
- "kubectl exec -n %v %v -- %v",
- namespace,
- miniClientPodName,
- forgeWALOnMinioCmd))
-
- return err
-}
-
-// TestFileExist tests if a file specified with `fileName` exist under directory `directoryPath`, on pod `podName` in
-// namespace `namespace`
-func TestFileExist(namespace, podName, directoryPath, fileName string) bool {
- filePath := directoryPath + "/" + fileName
- testFileExistCommand := "test -f " + filePath
- _, _, err := RunUnchecked(fmt.Sprintf(
- "kubectl exec -n %v %v -- %v",
- namespace,
- podName,
- testFileExistCommand))
-
- return err == nil
-}
-
-// TestDirectoryEmpty tests if a directory `directoryPath` exists on pod `podName` in namespace `namespace`
-func TestDirectoryEmpty(namespace, podName, directoryPath string) bool {
- testDirectoryEmptyCommand := "test \"$(ls -A" + directoryPath + ")\""
- _, _, err := RunUnchecked(fmt.Sprintf(
- "kubectl exec -n %v %v -- %v",
- namespace,
- podName,
- testDirectoryEmptyCommand))
-
- return err == nil
-}
-
-// CreateObject create object in the Kubernetes cluster
-func CreateObject(env *TestingEnvironment, object client.Object, opts ...client.CreateOption) (client.Object, error) {
- err := retry.Do(
- func() error {
- return env.Client.Create(env.Ctx, object, opts...)
- },
- retry.Delay(PollingTime*time.Second),
- retry.Attempts(RetryAttempts),
- retry.DelayType(retry.FixedDelay),
- retry.RetryIf(func(err error) bool { return !apierrs.IsAlreadyExists(err) }),
- )
- return object, err
-}
-
-// DeleteObject delete object in the Kubernetes cluster
-func DeleteObject(env *TestingEnvironment, object client.Object, opts ...client.DeleteOption) error {
- err := retry.Do(
- func() error {
- return env.Client.Delete(env.Ctx, object, opts...)
- },
- retry.Delay(PollingTime*time.Second),
- retry.Attempts(RetryAttempts),
- retry.DelayType(retry.FixedDelay),
- retry.RetryIf(func(err error) bool { return !apierrs.IsNotFound(err) }),
- )
- return err
-}
-
-// GetObjectList retrieves list of objects for a given namespace and list options
-func GetObjectList(env *TestingEnvironment, objectList client.ObjectList, opts ...client.ListOption) error {
- err := retry.Do(
- func() error {
- err := env.Client.List(env.Ctx, objectList, opts...)
- if err != nil {
- return err
- }
- return nil
- },
- retry.Delay(PollingTime*time.Second),
- retry.Attempts(RetryAttempts),
- retry.DelayType(retry.FixedDelay),
- )
- return err
-}
-
-// GetObject retrieves an objects for the given object key from the Kubernetes Cluster
-func GetObject(env *TestingEnvironment, objectKey client.ObjectKey, object client.Object) error {
- err := retry.Do(
- func() error {
- err := env.Client.Get(env.Ctx, objectKey, object)
- if err != nil {
- return err
- }
- return nil
- },
- retry.Delay(PollingTime*time.Second),
- retry.Attempts(RetryAttempts),
- retry.DelayType(retry.FixedDelay),
- )
- return err
-}
diff --git a/tests/utils/deployment.go b/tests/utils/deployments/deployment.go
similarity index 72%
rename from tests/utils/deployment.go
rename to tests/utils/deployments/deployment.go
index 31995afc7d..0f9c409136 100644
--- a/tests/utils/deployment.go
+++ b/tests/utils/deployments/deployment.go
@@ -14,9 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package deployments contains functions to control deployments
+package deployments
import (
+ "context"
"fmt"
"time"
@@ -25,22 +27,27 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-// DeploymentIsReady checks if a Deployment is ready
-func DeploymentIsReady(deployment appsv1.Deployment) bool {
+// isReady checks if a Deployment is ready
+func isReady(deployment appsv1.Deployment) bool {
return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas
}
-// DeploymentWaitForReady waits for a Deployment to be ready
-func DeploymentWaitForReady(env *TestingEnvironment, deployment *appsv1.Deployment, timeoutSeconds uint) error {
+// WaitForReady waits for a Deployment to be ready
+func WaitForReady(
+ ctx context.Context,
+ crudClient client.Client,
+ deployment *appsv1.Deployment,
+ timeoutSeconds uint,
+) error {
err := retry.Do(
func() error {
- if err := env.Client.Get(env.Ctx, client.ObjectKey{
+ if err := crudClient.Get(ctx, client.ObjectKey{
Namespace: deployment.Namespace,
Name: deployment.Name,
}, deployment); err != nil {
return err
}
- if !DeploymentIsReady(*deployment) {
+ if !isReady(*deployment) {
return fmt.Errorf(
"deployment not ready. Namespace: %v, Name: %v",
deployment.Namespace,
diff --git a/tests/utils/doc.go b/tests/utils/doc.go
new file mode 100644
index 0000000000..72e13d50e2
--- /dev/null
+++ b/tests/utils/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package utils contains helper functions/methods for e2e
+package utils
diff --git a/tests/utils/environment/doc.go b/tests/utils/environment/doc.go
new file mode 100644
index 0000000000..5c0dbc857f
--- /dev/null
+++ b/tests/utils/environment/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package environment contains functions to handle the TestingEnvironment struct
+package environment
diff --git a/tests/utils/environment.go b/tests/utils/environment/environment.go
similarity index 50%
rename from tests/utils/environment.go
rename to tests/utils/environment/environment.go
index b93ea595d8..4a216075be 100644
--- a/tests/utils/environment.go
+++ b/tests/utils/environment/environment.go
@@ -14,14 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package environment
import (
"context"
"fmt"
"os"
- "path/filepath"
- "strings"
"sync"
"time"
@@ -32,14 +30,9 @@ import (
storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/thoas/go-funk"
- batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
- eventsv1 "k8s.io/api/events/v1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
- "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/utils/strings/slices"
@@ -47,9 +40,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
// Import the client auth plugin package to allow use gke or ake to run tests
_ "k8s.io/client-go/plugin/pkg/client/auth"
@@ -60,12 +54,6 @@ import (
const (
// RetryTimeout retry timeout (in seconds) when a client api call or kubectl cli request get failed
RetryTimeout = 60
- // RetryAttempts maximum number of attempts when it fails in `retry`. Mainly used in `RunUncheckedRetry`
- RetryAttempts = 5
- // PollingTime polling interval (in seconds) between retries
- PollingTime = 5
- // sternLogDirectory contains the fixed path to store the cluster logs
- sternLogDirectory = "cluster_logs/"
)
// TestingEnvironment struct for operator testing
@@ -76,12 +64,9 @@ type TestingEnvironment struct {
APIExtensionClient apiextensionsclientset.Interface
Ctx context.Context
Scheme *runtime.Scheme
- PreserveNamespaces []string
Log logr.Logger
PostgresVersion uint64
createdNamespaces *uniqueStringSlice
- AzureConfiguration AzureConfiguration
- SternLogDir string
}
type uniqueStringSlice struct {
@@ -111,7 +96,6 @@ func NewTestingEnvironment() (*TestingEnvironment, error) {
env.APIExtensionClient = apiextensionsclientset.NewForConfigOrDie(env.RestClientConfig)
env.Ctx = context.Background()
env.Scheme = runtime.NewScheme()
- env.SternLogDir = sternLogDirectory
if err := storagesnapshotv1.AddToScheme(env.Scheme); err != nil {
return nil, err
@@ -149,10 +133,6 @@ func NewTestingEnvironment() (*TestingEnvironment, error) {
return nil, err
}
- if preserveNamespaces := os.Getenv("PRESERVE_NAMESPACES"); preserveNamespaces != "" {
- env.PreserveNamespaces = strings.Fields(preserveNamespaces)
- }
-
clientDiscovery, err := utils.GetDiscoveryClient()
if err != nil {
return nil, fmt.Errorf("could not get the discovery client: %w", err)
@@ -163,8 +143,6 @@ func NewTestingEnvironment() (*TestingEnvironment, error) {
return nil, fmt.Errorf("could not detect SeccompProfile support: %w", err)
}
- env.AzureConfiguration = newAzureConfigurationFromEnv()
-
return &env, nil
}
@@ -186,115 +164,20 @@ func (env TestingEnvironment) EventuallyExecCommand(
return err
}
return nil
- }, RetryTimeout, PollingTime).Should(Succeed())
+ }, RetryTimeout, objects.PollingTime).Should(Succeed())
return stdOut, stdErr, err
}
-// ExecCommand wraps the utils.ExecCommand pre-setting values constant during
-// tests
-func (env TestingEnvironment) ExecCommand(
+// CreateUniqueTestNamespace creates a namespace by using the passed prefix.
+// Return the namespace name and any errors encountered.
+// The namespace is automatically cleaned up at the end of the test.
+func (env TestingEnvironment) CreateUniqueTestNamespace(
ctx context.Context,
- pod corev1.Pod,
- containerName string,
- timeout *time.Duration,
- command ...string,
-) (string, string, error) {
- return utils.ExecCommand(ctx, env.Interface, env.RestClientConfig,
- pod, containerName, timeout, command...)
-}
-
-// GetPVCList gathers the current list of PVCs in a namespace
-func (env TestingEnvironment) GetPVCList(namespace string) (*corev1.PersistentVolumeClaimList, error) {
- pvcList := &corev1.PersistentVolumeClaimList{}
- err := env.Client.List(
- env.Ctx, pvcList, client.InNamespace(namespace),
- )
- return pvcList, err
-}
-
-// GetSnapshotList gathers the current list of VolumeSnapshots in a namespace
-func (env TestingEnvironment) GetSnapshotList(namespace string) (*storagesnapshotv1.VolumeSnapshotList, error) {
- list := &storagesnapshotv1.VolumeSnapshotList{}
- err := env.Client.List(env.Ctx, list, client.InNamespace(namespace))
-
- return list, err
-}
-
-// GetJobList gathers the current list of jobs in a namespace
-func (env TestingEnvironment) GetJobList(namespace string) (*batchv1.JobList, error) {
- jobList := &batchv1.JobList{}
- err := env.Client.List(
- env.Ctx, jobList, client.InNamespace(namespace),
- )
- return jobList, err
-}
-
-// GetServiceAccountList gathers the current list of jobs in a namespace
-func (env TestingEnvironment) GetServiceAccountList(namespace string) (*corev1.ServiceAccountList, error) {
- serviceAccountList := &corev1.ServiceAccountList{}
- err := env.Client.List(
- env.Ctx, serviceAccountList, client.InNamespace(namespace),
- )
- return serviceAccountList, err
-}
+ crudClient client.Client,
+ namespacePrefix string,
+ opts ...client.CreateOption,
+) (string, error) {
+ name := env.createdNamespaces.generateUniqueName(namespacePrefix)
-// GetEventList gathers the current list of events in a namespace
-func (env TestingEnvironment) GetEventList(namespace string) (*eventsv1.EventList, error) {
- eventList := &eventsv1.EventList{}
- err := env.Client.List(
- env.Ctx, eventList, client.InNamespace(namespace),
- )
- return eventList, err
-}
-
-// GetNodeList gathers the current list of Nodes
-func (env TestingEnvironment) GetNodeList() (*corev1.NodeList, error) {
- nodeList := &corev1.NodeList{}
- err := env.Client.List(env.Ctx, nodeList, client.InNamespace(""))
- return nodeList, err
-}
-
-// GetBackupList gathers the current list of backup in namespace
-func (env TestingEnvironment) GetBackupList(namespace string) (*apiv1.BackupList, error) {
- backupList := &apiv1.BackupList{}
- err := env.Client.List(
- env.Ctx, backupList, client.InNamespace(namespace),
- )
- return backupList, err
-}
-
-// GetScheduledBackupList gathers the current list of scheduledBackup in namespace
-func (env TestingEnvironment) GetScheduledBackupList(namespace string) (*apiv1.ScheduledBackupList, error) {
- scheduledBackupList := &apiv1.ScheduledBackupList{}
- err := env.Client.List(
- env.Ctx, scheduledBackupList, client.InNamespace(namespace),
- )
- return scheduledBackupList, err
-}
-
-// GetResourceNamespacedNameFromYAML returns the NamespacedName representing a resource in a YAML file
-func (env TestingEnvironment) GetResourceNamespacedNameFromYAML(path string) (types.NamespacedName, error) {
- data, err := os.ReadFile(filepath.Clean(path))
- if err != nil {
- return types.NamespacedName{}, err
- }
- decoder := serializer.NewCodecFactory(env.Scheme).UniversalDeserializer()
- obj, _, err := decoder.Decode(data, nil, nil)
- if err != nil {
- return types.NamespacedName{}, err
- }
- objectMeta, err := meta.Accessor(obj)
- if err != nil {
- return types.NamespacedName{}, err
- }
- return types.NamespacedName{Namespace: objectMeta.GetNamespace(), Name: objectMeta.GetName()}, nil
-}
-
-// GetResourceNameFromYAML returns the name of a resource in a YAML file
-func (env TestingEnvironment) GetResourceNameFromYAML(path string) (string, error) {
- namespacedName, err := env.GetResourceNamespacedNameFromYAML(path)
- if err != nil {
- return "", err
- }
- return namespacedName.Name, err
+ return name, namespaces.CreateTestNamespace(ctx, crudClient, name, opts...)
}
diff --git a/tests/utils/namespace_test.go b/tests/utils/environment/environment_test.go
similarity index 98%
rename from tests/utils/namespace_test.go
rename to tests/utils/environment/environment_test.go
index 2919a8f915..914e8386a9 100644
--- a/tests/utils/namespace_test.go
+++ b/tests/utils/environment/environment_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package environment
import (
. "github.com/onsi/ginkgo/v2"
diff --git a/tests/utils/job.go b/tests/utils/environment/suite_test.go
similarity index 52%
rename from tests/utils/job.go
rename to tests/utils/environment/suite_test.go
index a9ae454301..61c876f728 100644
--- a/tests/utils/job.go
+++ b/tests/utils/environment/suite_test.go
@@ -14,28 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package environment
import (
- "errors"
- "fmt"
+ "testing"
- batchv1 "k8s.io/api/batch/v1"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
)
-// GetJob gets a Job by namespace and name
-func (env TestingEnvironment) GetJob(namespace, jobName string) (*batchv1.Job, error) {
- wrapErr := func(err error) error {
- return fmt.Errorf("while getting job '%s/%s': %w", namespace, jobName, err)
- }
- jobList, err := env.GetJobList(namespace)
- if err != nil {
- return nil, wrapErr(err)
- }
- for _, job := range jobList.Items {
- if jobName == job.Name {
- return &job, nil
- }
- }
- return nil, wrapErr(errors.New("job not found"))
+func TestUtils(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Utils test environment suite")
}
diff --git a/tests/utils/envsubst/doc.go b/tests/utils/envsubst/doc.go
new file mode 100644
index 0000000000..a7d2676b31
--- /dev/null
+++ b/tests/utils/envsubst/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package envsubst manage the replacemes of env variables in a file
+package envsubst
diff --git a/tests/utils/envsubst.go b/tests/utils/envsubst/envsubst.go
similarity index 99%
rename from tests/utils/envsubst.go
rename to tests/utils/envsubst/envsubst.go
index 74e23f77a8..c4290b38b5 100644
--- a/tests/utils/envsubst.go
+++ b/tests/utils/envsubst/envsubst.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package envsubst
import (
"bytes"
diff --git a/tests/utils/envsubst_test.go b/tests/utils/envsubst/envsubst_test.go
similarity index 99%
rename from tests/utils/envsubst_test.go
rename to tests/utils/envsubst/envsubst_test.go
index 1557a2dccf..da5df755e0 100644
--- a/tests/utils/envsubst_test.go
+++ b/tests/utils/envsubst/envsubst_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package envsubst
import (
"errors"
diff --git a/tests/utils/lease.go b/tests/utils/envsubst/suite_test.go
similarity index 51%
rename from tests/utils/lease.go
rename to tests/utils/envsubst/suite_test.go
index a74de844be..4ac5c2b4c5 100644
--- a/tests/utils/lease.go
+++ b/tests/utils/envsubst/suite_test.go
@@ -14,20 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package envsubst
import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "testing"
- "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
)
-// GetLeaderInfoFromLease gathers leader holderIdentity from the lease
-func GetLeaderInfoFromLease(operatorNamespace string, env *TestingEnvironment) (string, error) {
- leaseInterface := env.Interface.CoordinationV1().Leases(operatorNamespace)
- lease, err := leaseInterface.Get(env.Ctx, controller.LeaderElectionID, metav1.GetOptions{})
- if err != nil {
- return "", err
- }
- return *lease.Spec.HolderIdentity, nil
+func TestUtils(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Utils test envsubst suite")
}
diff --git a/tests/utils/exec/exec.go b/tests/utils/exec/exec.go
new file mode 100644
index 0000000000..58ca134ca6
--- /dev/null
+++ b/tests/utils/exec/exec.go
@@ -0,0 +1,156 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package exec provides functions to execute commands inside pods or from local
+package exec
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
+ pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+
+ . "github.com/onsi/gomega" // nolint
+)
+
+// ContainerLocator contains the necessary data to find a container on a pod
+type ContainerLocator struct {
+ Namespace string
+ PodName string
+ ContainerName string
+}
+
+// CommandInContainer executes commands in a given instance pod, in the
+// postgres container
+func CommandInContainer(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ container ContainerLocator,
+ timeout *time.Duration,
+ command ...string,
+) (string, string, error) {
+ wrapErr := func(err error) error {
+ return fmt.Errorf("while executing command in pod '%s/%s': %w",
+ container.Namespace, container.PodName, err)
+ }
+ pod, err := pods.Get(ctx, crudClient, container.Namespace, container.PodName)
+ if err != nil {
+ return "", "", wrapErr(err)
+ }
+ if !pkgutils.IsPodReady(*pod) {
+ return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name)
+ }
+ return Command(ctx, kubeInterface, restConfig, *pod, container.ContainerName, timeout, command...)
+}
+
+// PodLocator contains the necessary data to find a pod
+type PodLocator struct {
+ Namespace string
+ PodName string
+}
+
+// CommandInInstancePod executes commands in a given instance pod, in the
+// postgres container
+func CommandInInstancePod(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ podLocator PodLocator,
+ timeout *time.Duration,
+ command ...string,
+) (string, string, error) {
+ return CommandInContainer(
+ ctx, crudClient, kubeInterface, restConfig,
+ ContainerLocator{
+ Namespace: podLocator.Namespace,
+ PodName: podLocator.PodName,
+ ContainerName: specs.PostgresContainerName,
+ }, timeout, command...)
+}
+
+// DatabaseName is a special type for the database argument in an Exec call
+type DatabaseName string
+
+// QueryInInstancePod executes a query in an instance pod, by connecting to the pod
+// and the postgres container, and using a local connection with the postgres user
+func QueryInInstancePod(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ podLocator PodLocator,
+ dbname DatabaseName,
+ query string,
+) (string, string, error) {
+ timeout := time.Second * 10
+ return CommandInInstancePod(
+ ctx, crudClient, kubeInterface, restConfig,
+ PodLocator{
+ Namespace: podLocator.Namespace,
+ PodName: podLocator.PodName,
+ }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query)
+}
+
+// EventuallyExecQueryInInstancePod wraps QueryInInstancePod with an Eventually clause
+func EventuallyExecQueryInInstancePod(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ podLocator PodLocator,
+ dbname DatabaseName,
+ query string,
+ retryTimeout int,
+ pollingTime int,
+) (string, string, error) {
+ var stdOut, stdErr string
+ var err error
+
+ Eventually(func() error {
+ stdOut, stdErr, err = QueryInInstancePod(
+ ctx, crudClient, kubeInterface, restConfig,
+ podLocator, dbname, query)
+ return err
+ }, retryTimeout, pollingTime).Should(Succeed())
+
+ return stdOut, stdErr, err
+}
+
+// Command wraps the utils.ExecCommand pre-setting values constant during
+// tests
+func Command(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ pod v1.Pod,
+ containerName string,
+ timeout *time.Duration,
+ command ...string,
+) (string, string, error) {
+ return pkgutils.ExecCommand(ctx, kubeInterface, restConfig,
+ pod, containerName, timeout, command...)
+}
diff --git a/tests/utils/fence.go b/tests/utils/fencing/fencing.go
similarity index 61%
rename from tests/utils/fence.go
rename to tests/utils/fencing/fencing.go
index bbbc52491d..f48ffaf420 100644
--- a/tests/utils/fence.go
+++ b/tests/utils/fencing/fencing.go
@@ -14,47 +14,52 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package fencing provides functions to manage the fencing on cnpg clusters
+package fencing
import (
+ "context"
"fmt"
"k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
)
-// FencingMethod will be one of the supported ways to trigger an instance fencing
-type FencingMethod string
+// Method will be one of the supported ways to trigger an instance fencing
+type Method string
const (
// UsingAnnotation it is a keyword to use while fencing on/off the instances using annotation method
- UsingAnnotation FencingMethod = "annotation"
+ UsingAnnotation Method = "annotation"
// UsingPlugin it is a keyword to use while fencing on/off the instances using plugin method
- UsingPlugin FencingMethod = "plugin"
+ UsingPlugin Method = "plugin"
)
-// FencingOn marks an instance in a cluster as fenced
-func FencingOn(
- env *TestingEnvironment,
+// On marks an instance in a cluster as fenced
+func On(
+ ctx context.Context,
+ crudClient client.Client,
serverName,
namespace,
clusterName string,
- fencingMethod FencingMethod,
+ fencingMethod Method,
) error {
switch fencingMethod {
case UsingPlugin:
- _, _, err := Run(fmt.Sprintf("kubectl cnpg fencing on %v %v -n %v",
+ _, _, err := run.Run(fmt.Sprintf("kubectl cnpg fencing on %v %v -n %v",
clusterName, serverName, namespace))
if err != nil {
return err
}
case UsingAnnotation:
- err := utils.NewFencingMetadataExecutor(env.Client).
+ err := utils.NewFencingMetadataExecutor(crudClient).
AddFencing().
ForInstance(serverName).
- Execute(env.Ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{})
+ Execute(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{})
if err != nil {
return err
}
@@ -64,26 +69,27 @@ func FencingOn(
return nil
}
-// FencingOff marks an instance in a cluster as not fenced
-func FencingOff(
- env *TestingEnvironment,
+// Off marks an instance in a cluster as not fenced
+func Off(
+ ctx context.Context,
+ crudClient client.Client,
serverName,
namespace,
clusterName string,
- fencingMethod FencingMethod,
+ fencingMethod Method,
) error {
switch fencingMethod {
case UsingPlugin:
- _, _, err := Run(fmt.Sprintf("kubectl cnpg fencing off %v %v -n %v",
+ _, _, err := run.Run(fmt.Sprintf("kubectl cnpg fencing off %v %v -n %v",
clusterName, serverName, namespace))
if err != nil {
return err
}
case UsingAnnotation:
- err := utils.NewFencingMetadataExecutor(env.Client).
+ err := utils.NewFencingMetadataExecutor(crudClient).
RemoveFencing().
ForInstance(serverName).
- Execute(env.Ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{})
+ Execute(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{})
if err != nil {
return err
}
diff --git a/tests/utils/hibernate.go b/tests/utils/hibernate.go
deleted file mode 100644
index 3faee5a5a6..0000000000
--- a/tests/utils/hibernate.go
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "context"
- "fmt"
-
- ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
-)
-
-// HibernationMethod will be one of the supported ways to trigger an instance fencing
-type HibernationMethod string
-
-const (
- // HibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method
- HibernateDeclaratively HibernationMethod = "annotation"
- // HibernateImperatively it is a keyword to use while fencing on/off the instances using plugin method
- HibernateImperatively HibernationMethod = "plugin"
-)
-
-// HibernateOn hibernate on a cluster
-func HibernateOn(
- env *TestingEnvironment,
- namespace,
- clusterName string,
- method HibernationMethod,
-) error {
- switch method {
- case HibernateImperatively:
- _, _, err := Run(fmt.Sprintf("kubectl cnpg hibernate on %v -n %v",
- clusterName, namespace))
- if err != nil {
- return err
- }
- return nil
- case HibernateDeclaratively:
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return err
- }
- if cluster.Annotations == nil {
- cluster.Annotations = make(map[string]string)
- }
- originCluster := cluster.DeepCopy()
- cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOn
-
- err = env.Client.Patch(context.Background(), cluster, ctrlclient.MergeFrom(originCluster))
- return err
- default:
- return fmt.Errorf("unknown method: %v", method)
- }
-}
-
-// HibernateOff hibernate off a cluster
-func HibernateOff(
- env *TestingEnvironment,
- namespace,
- clusterName string,
- method HibernationMethod,
-) error {
- switch method {
- case HibernateImperatively:
- _, _, err := Run(fmt.Sprintf("kubectl cnpg hibernate off %v -n %v",
- clusterName, namespace))
- if err != nil {
- return err
- }
- return nil
- case HibernateDeclaratively:
- cluster, err := env.GetCluster(namespace, clusterName)
- if err != nil {
- return err
- }
- if cluster.Annotations == nil {
- cluster.Annotations = make(map[string]string)
- }
- originCluster := cluster.DeepCopy()
- cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOff
-
- err = env.Client.Patch(context.Background(), cluster, ctrlclient.MergeFrom(originCluster))
- return err
- default:
- return fmt.Errorf("unknown method: %v", method)
- }
-}
diff --git a/tests/utils/import_db.go b/tests/utils/importdb/import_db.go
similarity index 84%
rename from tests/utils/import_db.go
rename to tests/utils/importdb/import_db.go
index ccb5e62175..1316a76fab 100644
--- a/tests/utils/import_db.go
+++ b/tests/utils/importdb/import_db.go
@@ -14,17 +14,23 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package importdb contains the functions to import a database
+package importdb
import (
+ "context"
"fmt"
"os"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services"
)
// ImportDatabaseMicroservice creates a cluster, starting from an external cluster
@@ -32,18 +38,19 @@ import (
// NOTE: the application user on the source Cluster needs to be granted with
// REPLICATION permissions, which are not set by default
func ImportDatabaseMicroservice(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
sourceClusterName,
importedClusterName,
imageName,
databaseName string,
- env *TestingEnvironment,
) (*apiv1.Cluster, error) {
if imageName == "" {
imageName = os.Getenv("POSTGRES_IMG")
}
storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
- host, err := GetHostName(namespace, sourceClusterName, env)
+ host, err := services.GetHostName(ctx, crudClient, namespace, sourceClusterName)
if err != nil {
return nil, err
}
@@ -82,8 +89,8 @@ func ImportDatabaseMicroservice(
Name: sourceClusterName,
ConnectionParameters: map[string]string{
"host": host,
- "user": AppUser,
- "dbname": AppDBName,
+ "user": postgres.AppUser,
+ "dbname": postgres.AppDBName,
},
Password: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -96,7 +103,7 @@ func ImportDatabaseMicroservice(
},
}
- obj, err := CreateObject(env, restoreCluster)
+ obj, err := objects.Create(ctx, crudClient, restoreCluster)
if err != nil {
return nil, err
}
@@ -112,19 +119,20 @@ func ImportDatabaseMicroservice(
// Imports all the specified `databaseNames` and `roles` from the source cluster
// NOTE: enableSuperuserAccess needs to be enabled
func ImportDatabasesMonolith(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
sourceClusterName,
importedClusterName,
imageName string,
databaseNames []string,
roles []string,
- env *TestingEnvironment,
) (*apiv1.Cluster, error) {
if imageName == "" {
imageName = os.Getenv("POSTGRES_IMG")
}
storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS")
- host, err := GetHostName(namespace, sourceClusterName, env)
+ host, err := services.GetHostName(ctx, crudClient, namespace, sourceClusterName)
if err != nil {
return nil, err
}
@@ -161,8 +169,8 @@ func ImportDatabasesMonolith(
Name: sourceClusterName,
ConnectionParameters: map[string]string{
"host": host,
- "user": PostgresUser,
- "dbname": PostgresDBName,
+ "user": postgres.PostgresUser,
+ "dbname": postgres.PostgresDBName,
},
Password: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -175,7 +183,7 @@ func ImportDatabasesMonolith(
},
}
- obj, err := CreateObject(env, targetCluster)
+ obj, err := objects.Create(ctx, crudClient, targetCluster)
if err != nil {
return nil, err
}
diff --git a/tests/utils/logs/doc.go b/tests/utils/logs/doc.go
new file mode 100644
index 0000000000..4af5e3c745
--- /dev/null
+++ b/tests/utils/logs/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package logs provides a way to parse and get the logs of a pod
+package logs
diff --git a/tests/utils/logs.go b/tests/utils/logs/logs.go
similarity index 93%
rename from tests/utils/logs.go
rename to tests/utils/logs/logs.go
index 04c7e2318c..f344713426 100644
--- a/tests/utils/logs.go
+++ b/tests/utils/logs/logs.go
@@ -14,21 +14,30 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package logs
import (
+ "context"
"encoding/json"
"fmt"
"slices"
"strings"
"time"
+
+ "k8s.io/client-go/kubernetes"
+
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
)
// ParseJSONLogs returns the pod's logs of a given pod name,
// in the form of a list of JSON entries
-func ParseJSONLogs(namespace string, podName string, env *TestingEnvironment) ([]map[string]interface{}, error) {
+func ParseJSONLogs(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ namespace string, podName string,
+) ([]map[string]interface{}, error) {
// Gather pod logs
- podLogs, err := env.GetPodLogs(namespace, podName)
+ podLogs, err := pods.Logs(ctx, kubeInterface, namespace, podName)
if err != nil {
return nil, err
}
diff --git a/tests/utils/logs_test.go b/tests/utils/logs/logs_test.go
similarity index 99%
rename from tests/utils/logs_test.go
rename to tests/utils/logs/logs_test.go
index d7fd064253..9f951bc06e 100644
--- a/tests/utils/logs_test.go
+++ b/tests/utils/logs/logs_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package logs
import (
"encoding/json"
diff --git a/tests/utils/suite_test.go b/tests/utils/logs/suite_test.go
similarity index 93%
rename from tests/utils/suite_test.go
rename to tests/utils/logs/suite_test.go
index e15d55b783..329766c9e7 100644
--- a/tests/utils/suite_test.go
+++ b/tests/utils/logs/suite_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package logs
import (
"testing"
@@ -25,5 +25,5 @@ import (
func TestUtils(t *testing.T) {
RegisterFailHandler(Fail)
- RunSpecs(t, "Utils test suite")
+ RunSpecs(t, "Utils test logs suite")
}
diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go
index a5f878ed85..27befd0a26 100644
--- a/tests/utils/minio/minio.go
+++ b/tests/utils/minio/minio.go
@@ -39,7 +39,10 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
)
const (
@@ -75,7 +78,7 @@ type TagSet struct {
// installMinio installs minio in a given namespace
func installMinio(
- env *utils.TestingEnvironment,
+ env *environment.TestingEnvironment,
minioSetup Setup,
timeoutSeconds uint,
) error {
@@ -453,7 +456,7 @@ func sslClient(namespace string) corev1.Pod {
}
// Deploy will create a full MinIO deployment defined inthe minioEnv variable
-func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) {
+func Deploy(minioEnv *Env, env *environment.TestingEnvironment) (*corev1.Pod, error) {
var err error
minioEnv.CaPair, err = certs.CreateRootCA(minioEnv.Namespace, "minio")
if err != nil {
@@ -461,7 +464,7 @@ func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) {
}
minioEnv.CaSecretObj = *minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName)
- if _, err = utils.CreateObject(env, &minioEnv.CaSecretObj); err != nil {
+ if _, err = objects.Create(env.Ctx, env.Client, &minioEnv.CaSecretObj); err != nil {
return nil, err
}
@@ -488,10 +491,10 @@ func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) {
minioClient := sslClient(minioEnv.Namespace)
- return &minioClient, utils.PodCreateAndWaitForReady(env, &minioClient, 240)
+ return &minioClient, pods.CreateAndWaitForReady(env.Ctx, env.Client, &minioClient, 240)
}
-func (m *Env) getCaSecret(env *utils.TestingEnvironment, namespace string) (*corev1.Secret, error) {
+func (m *Env) getCaSecret(env *environment.TestingEnvironment, namespace string) (*corev1.Secret, error) {
var certSecret corev1.Secret
if err := env.Client.Get(env.Ctx,
types.NamespacedName{
@@ -512,12 +515,12 @@ func (m *Env) getCaSecret(env *utils.TestingEnvironment, namespace string) (*cor
}
// CreateCaSecret creates the certificates required to authenticate against the the MinIO service
-func (m *Env) CreateCaSecret(env *utils.TestingEnvironment, namespace string) error {
+func (m *Env) CreateCaSecret(env *environment.TestingEnvironment, namespace string) error {
caSecret, err := m.getCaSecret(env, namespace)
if err != nil {
return err
}
- _, err = utils.CreateObject(env, caSecret)
+ _, err = objects.Create(env.Ctx, env.Client, caSecret)
return err
}
@@ -525,7 +528,7 @@ func (m *Env) CreateCaSecret(env *utils.TestingEnvironment, namespace string) er
// amount of files matching the given `path`
func CountFiles(minioEnv *Env, path string) (value int, err error) {
var stdout string
- stdout, _, err = utils.RunUnchecked(fmt.Sprintf(
+ stdout, _, err = run.Unchecked(fmt.Sprintf(
"kubectl exec -n %v %v -- %v",
minioEnv.Namespace,
minioEnv.Client.Name,
@@ -541,7 +544,7 @@ func CountFiles(minioEnv *Env, path string) (value int, err error) {
// paths matching the given `path`
func ListFiles(minioEnv *Env, path string) (string, error) {
var stdout string
- stdout, _, err := utils.RunUnchecked(fmt.Sprintf(
+ stdout, _, err := run.Unchecked(fmt.Sprintf(
"kubectl exec -n %v %v -- %v",
minioEnv.Namespace,
minioEnv.Client.Name,
@@ -571,7 +574,7 @@ func composeFindCmd(path string, serviceName string) string {
func GetFileTags(minioEnv *Env, path string) (TagSet, error) {
var output TagSet
// Make sure we have a registered backup to access
- out, _, err := utils.RunUncheckedRetry(fmt.Sprintf(
+ out, _, err := run.UncheckedRetry(fmt.Sprintf(
"kubectl exec -n %v %v -- sh -c 'mc find minio --path %v | head -n1'",
minioEnv.Namespace,
minioEnv.Client.Name,
@@ -582,7 +585,7 @@ func GetFileTags(minioEnv *Env, path string) (TagSet, error) {
walFile := strings.Trim(out, "\n")
- stdout, _, err := utils.RunUncheckedRetry(fmt.Sprintf(
+ stdout, _, err := run.UncheckedRetry(fmt.Sprintf(
"kubectl exec -n %v %v -- sh -c 'mc --json tag list %v'",
minioEnv.Namespace,
minioEnv.Client.Name,
@@ -613,7 +616,7 @@ func TestConnectivityUsingBarmanCloudWalArchive(
"barman-cloud-wal-archive --cloud-provider aws-s3 --endpoint-url https://%s:9000 s3://cluster-backups/ %s "+
"000000010000000000000000 --test", postgres.BarmanBackupEndpointCACertificateLocation, id, key,
minioSvcName, clusterName)
- _, _, err := utils.RunUnchecked(fmt.Sprintf(
+ _, _, err := run.Unchecked(fmt.Sprintf(
"kubectl exec -n %v %v -c postgres -- /bin/bash -c \"%v\"",
namespace,
podName,
@@ -627,7 +630,7 @@ func TestConnectivityUsingBarmanCloudWalArchive(
// CleanFiles clean files on minio for a given path
func CleanFiles(minioEnv *Env, path string) (string, error) {
var stdout string
- stdout, _, err := utils.RunUnchecked(fmt.Sprintf(
+ stdout, _, err := run.Unchecked(fmt.Sprintf(
"kubectl exec -n %v %v -- %v",
minioEnv.Namespace,
minioEnv.Client.Name,
diff --git a/tests/utils/namespace.go b/tests/utils/namespace.go
deleted file mode 100644
index 65a9513278..0000000000
--- a/tests/utils/namespace.go
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "path"
- "strings"
- "time"
-
- "github.com/cloudnative-pg/machinery/pkg/fileutils"
- "github.com/onsi/ginkgo/v2"
- corev1 "k8s.io/api/core/v1"
- apierrs "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/wait"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs"
-)
-
-// GetOperatorLogs collects the operator logs
-func (env TestingEnvironment) GetOperatorLogs(buf *bytes.Buffer) error {
- operatorPod, err := env.GetOperatorPod()
- if err != nil {
- return err
- }
-
- streamPodLog := logs.StreamingRequest{
- Pod: &operatorPod,
- Options: &corev1.PodLogOptions{
- Timestamps: false,
- Follow: false,
- },
- Client: env.Interface,
- }
- return streamPodLog.Stream(env.Ctx, buf)
-}
-
-// CleanupNamespace does cleanup duty related to the tear-down of a namespace,
-// and is intended to be called in a DeferCleanup clause
-func (env TestingEnvironment) CleanupNamespace(
- namespace string,
- testName string,
- testFailed bool,
-) error {
- if testFailed {
- env.DumpNamespaceObjects(namespace, "out/"+testName+".log")
- }
-
- if len(namespace) == 0 {
- return fmt.Errorf("namespace is empty")
- }
- exists, _ := fileutils.FileExists(path.Join(env.SternLogDir, namespace))
- if exists && !testFailed {
- err := fileutils.RemoveDirectory(path.Join(env.SternLogDir, namespace))
- if err != nil {
- return err
- }
- }
-
- return env.DeleteNamespace(namespace)
-}
-
-// CreateUniqueTestNamespace creates a namespace by using the passed prefix.
-// Return the namespace name and any errors encountered.
-// The namespace is automatically cleaned up at the end of the test.
-func (env TestingEnvironment) CreateUniqueTestNamespace(
- namespacePrefix string,
- opts ...client.CreateOption,
-) (string, error) {
- name := env.createdNamespaces.generateUniqueName(namespacePrefix)
-
- return name, env.CreateTestNamespace(name, opts...)
-}
-
-// CreateTestNamespace creates a namespace creates a namespace.
-// Prefer CreateUniqueTestNamespace instead, unless you need a
-// specific namespace name. If so, make sure there is no collision
-// potential.
-// The namespace is automatically cleaned up at the end of the test.
-func (env TestingEnvironment) CreateTestNamespace(
- name string,
- opts ...client.CreateOption,
-) error {
- err := env.CreateNamespace(name, opts...)
- if err != nil {
- return err
- }
-
- ginkgo.DeferCleanup(func() error {
- return env.CleanupNamespace(
- name,
- ginkgo.CurrentSpecReport().LeafNodeText,
- ginkgo.CurrentSpecReport().Failed(),
- )
- })
-
- return nil
-}
-
-// CreateNamespace creates a namespace.
-func (env TestingEnvironment) CreateNamespace(name string, opts ...client.CreateOption) error {
- // Exit immediately if the name is empty
- if name == "" {
- return errors.New("cannot create namespace with empty name")
- }
-
- u := &unstructured.Unstructured{}
- u.SetName(name)
- u.SetGroupVersionKind(schema.GroupVersionKind{
- Group: "",
- Version: "v1",
- Kind: "Namespace",
- })
- _, err := CreateObject(&env, u, opts...)
- return err
-}
-
-// EnsureNamespace checks for the presence of a namespace, and if it does not
-// exist, creates it
-func (env TestingEnvironment) EnsureNamespace(namespace string) error {
- var nsList corev1.NamespaceList
- err := GetObjectList(&env, &nsList)
- if err != nil {
- return err
- }
- for _, ns := range nsList.Items {
- if ns.Name == namespace {
- return nil
- }
- }
- return env.CreateNamespace(namespace)
-}
-
-// DeleteNamespace deletes a namespace if existent
-func (env TestingEnvironment) DeleteNamespace(name string, opts ...client.DeleteOption) error {
- // Exit immediately if the name is empty
- if name == "" {
- return errors.New("cannot delete namespace with empty name")
- }
-
- // Exit immediately if the namespace is listed in PreserveNamespaces
- for _, v := range env.PreserveNamespaces {
- if strings.HasPrefix(name, v) {
- return nil
- }
- }
-
- u := &unstructured.Unstructured{}
- u.SetName(name)
- u.SetGroupVersionKind(schema.GroupVersionKind{
- Group: "",
- Version: "v1",
- Kind: "Namespace",
- })
-
- return DeleteObject(&env, u, opts...)
-}
-
-// DeleteNamespaceAndWait deletes a namespace if existent and returns when deletion is completed
-func (env TestingEnvironment) DeleteNamespaceAndWait(name string, timeoutSeconds int) error {
- // Exit immediately if the namespace is listed in PreserveNamespaces
- for _, v := range env.PreserveNamespaces {
- if strings.HasPrefix(name, v) {
- return nil
- }
- }
-
- ctx, cancel := context.WithTimeout(env.Ctx, time.Duration(timeoutSeconds)*time.Second)
- defer cancel()
-
- err := env.DeleteNamespace(name, client.PropagationPolicy("Background"))
- if err != nil {
- return err
- }
-
- pods, err := env.GetPodList(name)
- if err != nil {
- return err
- }
-
- for _, pod := range pods.Items {
- err = env.DeletePod(name, pod.Name, client.GracePeriodSeconds(1), client.PropagationPolicy("Background"))
- if err != nil && !apierrs.IsNotFound(err) {
- return err
- }
- }
-
- return wait.PollUntilContextCancel(ctx, time.Second, true,
- func(ctx context.Context) (bool, error) {
- err := env.Client.Get(ctx, client.ObjectKey{Name: name}, &corev1.Namespace{})
- if apierrs.IsNotFound(err) {
- return true, nil
- }
- return false, err
- },
- )
-}
diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go
new file mode 100644
index 0000000000..a4e27dc91e
--- /dev/null
+++ b/tests/utils/namespaces/namespace.go
@@ -0,0 +1,377 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package namespaces provides utilities to manage namespaces
+package namespaces
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/cloudnative-pg/machinery/pkg/fileutils"
+ "github.com/onsi/ginkgo/v2"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/events/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+)
+
+// SternLogDirectory contains the fixed path to store the cluster logs
+const SternLogDirectory = "cluster_logs/"
+
+func getPreserveNamespaces() []string {
+ var preserveNamespacesList []string
+ _, ok := os.LookupEnv("PRESERVE_NAMESPACES")
+ if ok {
+ preserveNamespacesList = strings.Fields(os.Getenv("PRESERVE_NAMESPACES"))
+ }
+
+ return preserveNamespacesList
+}
+
+// cleanupNamespace does cleanup duty related to the tear-down of a namespace,
+// and is intended to be called in a DeferCleanup clause
+func cleanupNamespace(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, testName string,
+ testFailed bool,
+) error {
+ if testFailed {
+ DumpNamespaceObjects(ctx, crudClient, namespace, "out/"+testName+".log")
+ }
+
+ if len(namespace) == 0 {
+ return fmt.Errorf("namespace is empty")
+ }
+ exists, _ := fileutils.FileExists(path.Join(SternLogDirectory, namespace))
+ if exists && !testFailed {
+ err := fileutils.RemoveDirectory(path.Join(SternLogDirectory, namespace))
+ if err != nil {
+ return err
+ }
+ }
+
+ return deleteNamespace(ctx, crudClient, namespace)
+}
+
+// CreateTestNamespace creates a namespace creates a namespace.
+// Prefer CreateUniqueTestNamespace instead, unless you need a
+// specific namespace name. If so, make sure there is no collision
+// potential.
+// The namespace is automatically cleaned up at the end of the test.
+func CreateTestNamespace(
+ ctx context.Context,
+ crudClient client.Client,
+ name string,
+ opts ...client.CreateOption,
+) error {
+ err := CreateNamespace(ctx, crudClient, name, opts...)
+ if err != nil {
+ return err
+ }
+
+ ginkgo.DeferCleanup(func() error {
+ return cleanupNamespace(
+ ctx,
+ crudClient,
+ name,
+ ginkgo.CurrentSpecReport().LeafNodeText,
+ ginkgo.CurrentSpecReport().Failed(),
+ )
+ })
+
+ return nil
+}
+
+// CreateNamespace creates a namespace.
+func CreateNamespace(
+ ctx context.Context,
+ crudClient client.Client,
+ name string,
+ opts ...client.CreateOption,
+) error {
+ // Exit immediately if the name is empty
+ if name == "" {
+ return errors.New("cannot create namespace with empty name")
+ }
+
+ u := &unstructured.Unstructured{}
+ u.SetName(name)
+ u.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "",
+ Version: "v1",
+ Kind: "Namespace",
+ })
+ _, err := objects.Create(ctx, crudClient, u, opts...)
+ return err
+}
+
+// EnsureNamespace checks for the presence of a namespace, and if it does not
+// exist, creates it
+func EnsureNamespace(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) error {
+ var nsList corev1.NamespaceList
+ err := objects.List(ctx, crudClient, &nsList)
+ if err != nil {
+ return err
+ }
+ for _, ns := range nsList.Items {
+ if ns.Name == namespace {
+ return nil
+ }
+ }
+ return CreateNamespace(ctx, crudClient, namespace)
+}
+
+// deleteNamespace deletes a namespace if existent
+func deleteNamespace(
+ ctx context.Context,
+ crudClient client.Client,
+ name string,
+ opts ...client.DeleteOption,
+) error {
+ // Exit immediately if the name is empty
+ if name == "" {
+ return errors.New("cannot delete namespace with empty name")
+ }
+
+ // Exit immediately if the namespace is listed in PreserveNamespaces
+ for _, v := range getPreserveNamespaces() {
+ if strings.HasPrefix(name, v) {
+ return nil
+ }
+ }
+
+ u := &unstructured.Unstructured{}
+ u.SetName(name)
+ u.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "",
+ Version: "v1",
+ Kind: "Namespace",
+ })
+
+ return objects.Delete(ctx, crudClient, u, opts...)
+}
+
+// DeleteNamespaceAndWait deletes a namespace if existent and returns when deletion is completed
+func DeleteNamespaceAndWait(
+ ctx context.Context,
+ crudClient client.Client,
+ name string,
+ timeoutSeconds int,
+) error {
+ // Exit immediately if the namespace is listed in PreserveNamespaces
+ for _, v := range getPreserveNamespaces() {
+ if strings.HasPrefix(name, v) {
+ return nil
+ }
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
+ defer cancel()
+
+ err := deleteNamespace(ctx, crudClient, name, client.PropagationPolicy("Background"))
+ if err != nil {
+ return err
+ }
+
+ podList, err := pods.List(ctx, crudClient, name)
+ if err != nil {
+ return err
+ }
+
+ for _, pod := range podList.Items {
+ err = pods.Delete(
+ ctx, crudClient,
+ name, pod.Name,
+ client.GracePeriodSeconds(1), client.PropagationPolicy("Background"),
+ )
+ if err != nil && !apierrs.IsNotFound(err) {
+ return err
+ }
+ }
+
+ return wait.PollUntilContextCancel(ctx, time.Second, true,
+ func(ctx context.Context) (bool, error) {
+ err := crudClient.Get(ctx, client.ObjectKey{Name: name}, &corev1.Namespace{})
+ if apierrs.IsNotFound(err) {
+ return true, nil
+ }
+ return false, err
+ },
+ )
+}
+
+// DumpNamespaceObjects logs the clusters, pods, pvcs etc. found in a namespace as JSON sections
+func DumpNamespaceObjects(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, filename string,
+) {
+ f, err := os.Create(filepath.Clean(filename))
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ defer func() {
+ _ = f.Sync()
+ _ = f.Close()
+ }()
+ w := bufio.NewWriter(f)
+ clusterList := &apiv1.ClusterList{}
+ _ = objects.List(ctx, crudClient, clusterList, client.InNamespace(namespace))
+
+ for _, cluster := range clusterList.Items {
+ out, _ := json.MarshalIndent(cluster, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v cluster\n", namespace, cluster.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+
+ podList, _ := pods.List(ctx, crudClient, namespace)
+ for _, pod := range podList.Items {
+ out, _ := json.MarshalIndent(pod, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+
+ pvcList, _ := storage.GetPVCList(ctx, crudClient, namespace)
+ for _, pvc := range pvcList.Items {
+ out, _ := json.MarshalIndent(pvc, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v PVC\n", namespace, pvc.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+
+ jobList := &batchv1.JobList{}
+ _ = crudClient.List(
+ ctx, jobList, client.InNamespace(namespace),
+ )
+ for _, job := range jobList.Items {
+ out, _ := json.MarshalIndent(job, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v job\n", namespace, job.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+
+ eventList, _ := GetEventList(ctx, crudClient, namespace)
+ out, _ := json.MarshalIndent(eventList.Items, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping events for namespace %v\n", namespace)
+ _, _ = fmt.Fprintln(w, string(out))
+
+ serviceAccountList, _ := GetServiceAccountList(ctx, crudClient, namespace)
+ for _, sa := range serviceAccountList.Items {
+ out, _ := json.MarshalIndent(sa, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v serviceaccount\n", namespace, sa.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+
+ suffixes := []string{"-r", "-rw", "-any"}
+ for _, cluster := range clusterList.Items {
+ for _, suffix := range suffixes {
+ namespacedName := types.NamespacedName{
+ Namespace: namespace,
+ Name: cluster.Name + suffix,
+ }
+ endpoint := &corev1.Endpoints{}
+ _ = crudClient.Get(ctx, namespacedName, endpoint)
+ out, _ := json.MarshalIndent(endpoint, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v endpoint\n", namespace, endpoint.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+ }
+ // dump backup info
+ backupList, _ := backups.List(ctx, crudClient, namespace)
+ // dump backup object info if it's configure
+ for _, backup := range backupList.Items {
+ out, _ := json.MarshalIndent(backup, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v backup\n", namespace, backup.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+ // dump scheduledbackup info
+ scheduledBackupList, _ := GetScheduledBackupList(ctx, crudClient, namespace)
+ // dump backup object info if it's configure
+ for _, scheduledBackup := range scheduledBackupList.Items {
+ out, _ := json.MarshalIndent(scheduledBackup, "", " ")
+ _, _ = fmt.Fprintf(w, "Dumping %v/%v scheduledbackup\n", namespace, scheduledBackup.Name)
+ _, _ = fmt.Fprintln(w, string(out))
+ }
+
+ err = w.Flush()
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+
+// GetServiceAccountList gathers the current list of jobs in a namespace
+func GetServiceAccountList(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*corev1.ServiceAccountList, error) {
+ serviceAccountList := &corev1.ServiceAccountList{}
+ err := crudClient.List(
+ ctx, serviceAccountList, client.InNamespace(namespace),
+ )
+ return serviceAccountList, err
+}
+
+// GetEventList gathers the current list of events in a namespace
+func GetEventList(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*v1.EventList, error) {
+ eventList := &v1.EventList{}
+ err := crudClient.List(
+ ctx, eventList, client.InNamespace(namespace),
+ )
+ return eventList, err
+}
+
+// GetScheduledBackupList gathers the current list of scheduledBackup in namespace
+func GetScheduledBackupList(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*apiv1.ScheduledBackupList, error) {
+ scheduledBackupList := &apiv1.ScheduledBackupList{}
+ err := crudClient.List(
+ ctx, scheduledBackupList, client.InNamespace(namespace),
+ )
+ return scheduledBackupList, err
+}
diff --git a/tests/utils/nodes/drain.go b/tests/utils/nodes/nodes.go
similarity index 54%
rename from tests/utils/nodes/drain.go
rename to tests/utils/nodes/nodes.go
index dc2ede03c4..95d4bd3122 100644
--- a/tests/utils/nodes/drain.go
+++ b/tests/utils/nodes/nodes.go
@@ -18,31 +18,38 @@ limitations under the License.
package nodes
import (
+ "context"
"fmt"
+ "strings"
- "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+ v1 "k8s.io/api/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2" //nolint
. "github.com/onsi/gomega" //nolint
)
-// DrainPrimaryNode drains the node containing the primary pod.
+// DrainPrimary drains the node containing the primary pod.
// It returns the names of the pods that were running on that node
-func DrainPrimaryNode(
+func DrainPrimary(
+ ctx context.Context,
+ crudClient client.Client,
namespace,
clusterName string,
timeoutSeconds int,
- env *utils.TestingEnvironment,
) []string {
var primaryNode string
var podNames []string
By("identifying primary node and draining", func() {
- pod, err := env.GetClusterPrimary(namespace, clusterName)
+ pod, err := clusterutils.GetPrimary(ctx, crudClient, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
primaryNode = pod.Spec.NodeName
// Gather the pods running on this node
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
if pod.Spec.NodeName == primaryNode {
@@ -55,14 +62,14 @@ func DrainPrimaryNode(
Eventually(func() error {
cmd := fmt.Sprintf("kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds",
primaryNode, timeoutSeconds)
- stdout, stderr, err = utils.RunUnchecked(cmd)
+ stdout, stderr, err = run.Unchecked(cmd)
return err
}, timeoutSeconds).ShouldNot(HaveOccurred(), fmt.Sprintf("stdout: %s, stderr: %s", stdout, stderr))
})
By("ensuring no cluster pod is still running on the drained node", func() {
Eventually(func() ([]string, error) {
var usedNodes []string
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName)
for _, pod := range podList.Items {
usedNodes = append(usedNodes, pod.Spec.NodeName)
}
@@ -73,18 +80,52 @@ func DrainPrimaryNode(
return podNames
}
-// UncordonAllNodes executes the 'kubectl uncordon' command on each node of the list
-func UncordonAllNodes(env *utils.TestingEnvironment) error {
- nodeList, err := env.GetNodeList()
+// UncordonAll executes the 'kubectl uncordon' command on each node of the list
+func UncordonAll(
+ ctx context.Context,
+ crudClient client.Client,
+) error {
+ nodeList, err := List(ctx, crudClient)
if err != nil {
return err
}
for _, node := range nodeList.Items {
command := fmt.Sprintf("kubectl uncordon %v", node.Name)
- _, _, err = utils.Run(command)
+ _, _, err = run.Run(command)
if err != nil {
return err
}
}
return nil
}
+
+// List gathers the current list of Nodes
+func List(
+ ctx context.Context,
+ crudClient client.Client,
+) (*v1.NodeList, error) {
+ nodeList := &v1.NodeList{}
+ err := crudClient.List(ctx, nodeList, client.InNamespace(""))
+ return nodeList, err
+}
+
+// DescribeKubernetesNodes prints the `describe node` for each node in the
+// kubernetes cluster
+func DescribeKubernetesNodes(ctx context.Context, crudClient client.Client) (string, error) {
+ nodeList, err := List(ctx, crudClient)
+ if err != nil {
+ return "", err
+ }
+ var report strings.Builder
+ for _, node := range nodeList.Items {
+ command := fmt.Sprintf("kubectl describe node %v", node.Name)
+ stdout, _, err := run.Run(command)
+ if err != nil {
+ return "", err
+ }
+ report.WriteString("================================================\n")
+ report.WriteString(stdout)
+ report.WriteString("================================================\n")
+ }
+ return report.String(), nil
+}
diff --git a/tests/utils/objects/objects.go b/tests/utils/objects/objects.go
new file mode 100644
index 0000000000..af956106be
--- /dev/null
+++ b/tests/utils/objects/objects.go
@@ -0,0 +1,117 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package objects provides functions to manage pure objects in Kubernetes
+package objects
+
+import (
+ "context"
+ "time"
+
+ "github.com/avast/retry-go/v4"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+const (
+ // RetryAttempts maximum number of attempts when it fails in `retry`. Mainly used in `RunUncheckedRetry`
+ RetryAttempts = 5
+
+ // PollingTime polling interval (in seconds) between retries
+ PollingTime = 5
+)
+
+// Create creates object in the Kubernetes cluster
+func Create(
+ ctx context.Context,
+ crudClient client.Client,
+ object client.Object,
+ opts ...client.CreateOption,
+) (client.Object, error) {
+ err := retry.Do(
+ func() error {
+ return crudClient.Create(ctx, object, opts...)
+ },
+ retry.Delay(PollingTime*time.Second),
+ retry.Attempts(RetryAttempts),
+ retry.DelayType(retry.FixedDelay),
+ retry.RetryIf(func(err error) bool { return !errors.IsAlreadyExists(err) }),
+ )
+ return object, err
+}
+
+// Delete deletes an object in the Kubernetes cluster
+func Delete(
+ ctx context.Context,
+ crudClient client.Client,
+ object client.Object,
+ opts ...client.DeleteOption,
+) error {
+ err := retry.Do(
+ func() error {
+ return crudClient.Delete(ctx, object, opts...)
+ },
+ retry.Delay(PollingTime*time.Second),
+ retry.Attempts(RetryAttempts),
+ retry.DelayType(retry.FixedDelay),
+ retry.RetryIf(func(err error) bool { return !errors.IsNotFound(err) }),
+ )
+ return err
+}
+
+// List retrieves a list of objects
+func List(
+ ctx context.Context,
+ crudClient client.Client,
+ objectList client.ObjectList,
+ opts ...client.ListOption,
+) error {
+ err := retry.Do(
+ func() error {
+ err := crudClient.List(ctx, objectList, opts...)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+ retry.Delay(PollingTime*time.Second),
+ retry.Attempts(RetryAttempts),
+ retry.DelayType(retry.FixedDelay),
+ )
+ return err
+}
+
+// Get retrieves an object for the given object key from the Kubernetes Cluster
+func Get(
+ ctx context.Context,
+ crudClient client.Client,
+ objectKey client.ObjectKey,
+ object client.Object,
+) error {
+ err := retry.Do(
+ func() error {
+ err := crudClient.Get(ctx, objectKey, object)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+ retry.Delay(PollingTime*time.Second),
+ retry.Attempts(RetryAttempts),
+ retry.DelayType(retry.FixedDelay),
+ )
+ return err
+}
diff --git a/tests/utils/openshift.go b/tests/utils/openshift/openshift.go
similarity index 70%
rename from tests/utils/openshift.go
rename to tests/utils/openshift/openshift.go
index 769ff8c413..2901962e83 100644
--- a/tests/utils/openshift.go
+++ b/tests/utils/openshift/openshift.go
@@ -14,9 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package openshift provides functions to work with OLM CRDs
+package openshift
import (
+ "context"
"fmt"
"strings"
@@ -27,14 +29,20 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
+ "k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
- ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
)
-// GetSubscription returns an unstructured subscription object
-func GetSubscription(env *TestingEnvironment) (*unstructured.Unstructured, error) {
+// getSubscription returns an unstructured subscription object
+func getSubscription(
+ ctx context.Context,
+ crudClient client.Client,
+) (*unstructured.Unstructured, error) {
subscription := &unstructured.Unstructured{}
subscription.SetName("cloudnative-pg")
subscription.SetNamespace("openshift-operators")
@@ -43,13 +51,16 @@ func GetSubscription(env *TestingEnvironment) (*unstructured.Unstructured, error
Version: "v1alpha1",
Kind: "Subscription",
})
- err := env.Client.Get(env.Ctx, ctrlclient.ObjectKeyFromObject(subscription), subscription)
+ err := crudClient.Get(ctx, client.ObjectKeyFromObject(subscription), subscription)
return subscription, err
}
// GetSubscriptionVersion retrieves the current ClusterServiceVersion version of the operator
-func GetSubscriptionVersion(env *TestingEnvironment) (string, error) {
- subscription, err := GetSubscription(env)
+func GetSubscriptionVersion(
+ ctx context.Context,
+ crudClient client.Client,
+) (string, error) {
+ subscription, err := getSubscription(ctx, crudClient)
if err != nil {
return "", err
}
@@ -65,17 +76,21 @@ func GetSubscriptionVersion(env *TestingEnvironment) (string, error) {
}
// PatchStatusCondition removes status conditions on a given Cluster
-func PatchStatusCondition(namespace, clusterName string, env *TestingEnvironment) error {
+func PatchStatusCondition(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) error {
cluster := &apiv1.Cluster{}
var err error
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- cluster, err = env.GetCluster(namespace, clusterName)
+ cluster, err = clusterutils.Get(ctx, crudClient, namespace, clusterName)
if err != nil {
return err
}
clusterNoConditions := cluster.DeepCopy()
clusterNoConditions.Status.Conditions = nil
- return env.Client.Patch(env.Ctx, clusterNoConditions, ctrlclient.MergeFrom(cluster))
+ return crudClient.Patch(ctx, clusterNoConditions, client.MergeFrom(cluster))
})
if err != nil {
return err
@@ -84,8 +99,8 @@ func PatchStatusCondition(namespace, clusterName string, env *TestingEnvironment
}
// GetOpenshiftVersion returns the current openshift version
-func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) {
- client, err := dynamic.NewForConfig(env.RestClientConfig)
+func GetOpenshiftVersion(ctx context.Context, restConfig *rest.Config) (semver.Version, error) {
+ client, err := dynamic.NewForConfig(restConfig)
if err != nil {
return semver.Version{}, err
}
@@ -94,7 +109,7 @@ func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) {
Group: "operator.openshift.io",
Version: "v1",
Resource: "openshiftcontrollermanagers",
- }).Get(env.Ctx, "cluster", v1.GetOptions{})
+ }).Get(ctx, "cluster", v1.GetOptions{})
if err != nil {
return semver.Version{}, err
}
@@ -108,7 +123,11 @@ func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) {
}
// CreateSubscription creates a subscription object inside openshift with a fixed name
-func CreateSubscription(env *TestingEnvironment, channel string) error {
+func CreateSubscription(
+ ctx context.Context,
+ crudClient client.Client,
+ channel string,
+) error {
u := &unstructured.Unstructured{}
u.SetName("cloudnative-pg")
u.SetNamespace("openshift-operators")
@@ -131,12 +150,15 @@ func CreateSubscription(env *TestingEnvironment, channel string) error {
return err
}
- _, err = CreateObject(env, u)
+ _, err = objects.Create(ctx, crudClient, u)
return err
}
// DeleteSubscription deletes the operator's subscription object
-func DeleteSubscription(env *TestingEnvironment) error {
+func DeleteSubscription(
+ ctx context.Context,
+ crudClient client.Client,
+) error {
u := &unstructured.Unstructured{}
u.SetName("cloudnative-pg")
u.SetNamespace("openshift-operators")
@@ -146,7 +168,7 @@ func DeleteSubscription(env *TestingEnvironment) error {
Kind: "Subscription",
})
- err := DeleteObject(env, u)
+ err := objects.Delete(ctx, crudClient, u)
if apierrors.IsNotFound(err) {
return nil
}
@@ -155,7 +177,10 @@ func DeleteSubscription(env *TestingEnvironment) error {
}
// DeleteOperatorCRDs deletes the CRDs associated with the operator
-func DeleteOperatorCRDs(env *TestingEnvironment) error {
+func DeleteOperatorCRDs(
+ ctx context.Context,
+ crudClient client.Client,
+) error {
u := &unstructured.Unstructured{}
u.SetName("clusters.postgresql.cnpg.io")
u.SetGroupVersionKind(schema.GroupVersionKind{
@@ -163,22 +188,22 @@ func DeleteOperatorCRDs(env *TestingEnvironment) error {
Version: "v1",
Kind: "CustomResourceDefinition",
})
- err := DeleteObject(env, u)
+ err := objects.Delete(ctx, crudClient, u)
if !apierrors.IsNotFound(err) {
return err
}
u.SetName("backups.postgresql.cnpg.io")
- err = DeleteObject(env, u)
+ err = objects.Delete(ctx, crudClient, u)
if !apierrors.IsNotFound(err) {
return err
}
u.SetName("poolers.postgresql.cnpg.io")
- err = DeleteObject(env, u)
+ err = objects.Delete(ctx, crudClient, u)
if !apierrors.IsNotFound(err) {
return err
}
u.SetName("scheduledbackups.postgresql.cnpg.io")
- err = DeleteObject(env, u)
+ err = objects.Delete(ctx, crudClient, u)
if apierrors.IsNotFound(err) {
return nil
}
@@ -186,7 +211,10 @@ func DeleteOperatorCRDs(env *TestingEnvironment) error {
}
// DeleteCSV will delete all operator's CSVs
-func DeleteCSV(env *TestingEnvironment) error {
+func DeleteCSV(
+ ctx context.Context,
+ crudClient client.Client,
+) error {
ol := &unstructured.UnstructuredList{}
ol.SetGroupVersionKind(schema.GroupVersionKind{
Group: "operators.coreos.com",
@@ -196,12 +224,12 @@ func DeleteCSV(env *TestingEnvironment) error {
labelSelector := labels.SelectorFromSet(map[string]string{
"operators.coreos.com/cloudnative-pg.openshift-operators": "",
})
- err := GetObjectList(env, ol, ctrlclient.MatchingLabelsSelector{Selector: labelSelector})
+ err := objects.List(ctx, crudClient, ol, client.MatchingLabelsSelector{Selector: labelSelector})
if err != nil {
return err
}
for _, o := range ol.Items {
- err = DeleteObject(env, &o)
+ err = objects.Delete(ctx, crudClient, &o)
if err != nil {
if apierrors.IsNotFound(err) {
continue
@@ -213,8 +241,12 @@ func DeleteCSV(env *TestingEnvironment) error {
}
// UpgradeSubscription patch an unstructured subscription object with target channel
-func UpgradeSubscription(env *TestingEnvironment, channel string) error {
- subscription, err := GetSubscription(env)
+func UpgradeSubscription(
+ ctx context.Context,
+ crudClient client.Client,
+ channel string,
+) error {
+ subscription, err := getSubscription(ctx, crudClient)
if err != nil {
return err
}
@@ -225,5 +257,5 @@ func UpgradeSubscription(env *TestingEnvironment, channel string) error {
return err
}
- return env.Client.Patch(env.Ctx, newSubscription, ctrlclient.MergeFrom(subscription))
+ return crudClient.Patch(ctx, newSubscription, client.MergeFrom(subscription))
}
diff --git a/tests/utils/operator/doc.go b/tests/utils/operator/doc.go
new file mode 100644
index 0000000000..a4e7050ee6
--- /dev/null
+++ b/tests/utils/operator/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package operator provides functions to handle and manage the operator
+package operator
diff --git a/tests/utils/operator.go b/tests/utils/operator/operator.go
similarity index 59%
rename from tests/utils/operator.go
rename to tests/utils/operator/operator.go
index 7ab479b09c..ca705e9e41 100644
--- a/tests/utils/operator.go
+++ b/tests/utils/operator/operator.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package operator
import (
"bufio"
+ "context"
"encoding/json"
"fmt"
"os"
@@ -31,30 +32,40 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
+ "k8s.io/client-go/kubernetes"
"k8s.io/utils/ptr"
- ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
)
-// ReloadOperatorDeployment finds and deletes the operator pod. Returns
+// ReloadDeployment finds and deletes the operator pod. Returns
// error if the new pod is not ready within a defined timeout
-func ReloadOperatorDeployment(env *TestingEnvironment, timeoutSeconds uint) error {
- operatorPod, err := env.GetOperatorPod()
+func ReloadDeployment(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ timeoutSeconds uint,
+) error {
+ operatorPod, err := GetPod(ctx, crudClient)
if err != nil {
return err
}
zero := int64(0)
- err = env.Client.Delete(env.Ctx, &operatorPod,
- &ctrlclient.DeleteOptions{GracePeriodSeconds: &zero},
+ err = crudClient.Delete(ctx, &operatorPod,
+ &client.DeleteOptions{GracePeriodSeconds: &zero},
)
if err != nil {
return err
}
err = retry.Do(
func() error {
- ready, err := env.IsOperatorReady()
+ ready, err := IsReady(ctx, crudClient, kubeInterface)
if err != nil {
return err
}
@@ -69,8 +80,8 @@ func ReloadOperatorDeployment(env *TestingEnvironment, timeoutSeconds uint) erro
return err
}
-// DumpOperator logs the JSON for the deployment in an operator namespace, its pods and endpoints
-func (env TestingEnvironment) DumpOperator(namespace string, filename string) {
+// Dump logs the JSON for the deployment in an operator namespace, its pods and endpoints
+func Dump(ctx context.Context, crudClient client.Client, namespace, filename string) {
f, err := os.Create(filepath.Clean(filename))
if err != nil {
fmt.Println(err)
@@ -78,12 +89,12 @@ func (env TestingEnvironment) DumpOperator(namespace string, filename string) {
}
w := bufio.NewWriter(f)
- deployment, _ := env.GetOperatorDeployment()
+ deployment, _ := GetDeployment(ctx, crudClient)
out, _ := json.MarshalIndent(deployment, "", " ")
_, _ = fmt.Fprintf(w, "Dumping %v/%v deployment\n", namespace, deployment.Name)
_, _ = fmt.Fprintln(w, string(out))
- podList, _ := env.GetPodList(namespace)
+ podList, _ := pods.List(ctx, crudClient, namespace)
for _, pod := range podList.Items {
out, _ := json.MarshalIndent(pod, "", " ")
_, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name)
@@ -99,11 +110,11 @@ func (env TestingEnvironment) DumpOperator(namespace string, filename string) {
_ = f.Close()
}
-// GetOperatorDeployment returns the operator Deployment if there is a single one running, error otherwise
-func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error) {
+// GetDeployment returns the operator Deployment if there is a single one running, error otherwise
+func GetDeployment(ctx context.Context, crudClient client.Client) (appsv1.Deployment, error) {
deploymentList := &appsv1.DeploymentList{}
- if err := GetObjectList(&env, deploymentList,
- ctrlclient.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"},
+ if err := objects.List(ctx, crudClient, deploymentList,
+ client.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"},
); err != nil {
return appsv1.Deployment{}, err
}
@@ -116,10 +127,11 @@ func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error)
return deploymentList.Items[0], nil
}
- if err := GetObjectList(
- &env,
+ if err := objects.List(
+ ctx,
+ crudClient,
deploymentList,
- ctrlclient.HasLabels{"operators.coreos.com/cloudnative-pg.openshift-operators"},
+ client.HasLabels{"operators.coreos.com/cloudnative-pg.openshift-operators"},
); err != nil {
return appsv1.Deployment{}, err
}
@@ -136,14 +148,15 @@ func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error)
return deploymentList.Items[0], nil
}
-// GetOperatorPod returns the operator pod if there is a single one running, error otherwise
-func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) {
+// GetPod returns the operator pod if there is a single one running, error otherwise
+func GetPod(ctx context.Context, crudClient client.Client) (corev1.Pod, error) {
podList := &corev1.PodList{}
// This will work for newer version of the operator, which are using
// our custom label
- if err := GetObjectList(
- &env, podList, ctrlclient.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}); err != nil {
+ if err := objects.List(
+ ctx, crudClient,
+ podList, client.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}); err != nil {
return corev1.Pod{}, err
}
activePods := utils.FilterActivePods(podList.Items)
@@ -156,17 +169,17 @@ func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) {
return activePods[0], nil
}
- operatorNamespace, err := env.GetOperatorNamespaceName()
+ operatorNamespace, err := NamespaceName(ctx, crudClient)
if err != nil {
return corev1.Pod{}, err
}
// This will work for older version of the operator, which are using
// the default label from kube-builder
- if err := GetObjectList(
- &env, podList,
- ctrlclient.MatchingLabels{"control-plane": "controller-manager"},
- ctrlclient.InNamespace(operatorNamespace)); err != nil {
+ if err := objects.List(
+ ctx, crudClient, podList,
+ client.MatchingLabels{"control-plane": "controller-manager"},
+ client.InNamespace(operatorNamespace)); err != nil {
return corev1.Pod{}, err
}
activePods = utils.FilterActivePods(podList.Items)
@@ -178,18 +191,22 @@ func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) {
return podList.Items[0], nil
}
-// GetOperatorNamespaceName returns the namespace the operator Deployment is running in
-func (env TestingEnvironment) GetOperatorNamespaceName() (string, error) {
- deployment, err := env.GetOperatorDeployment()
+// NamespaceName returns the namespace the operator Deployment is running in
+func NamespaceName(ctx context.Context, crudClient client.Client) (string, error) {
+ deployment, err := GetDeployment(ctx, crudClient)
if err != nil {
return "", err
}
return deployment.GetNamespace(), err
}
-// IsOperatorReady ensures that the operator will be ready.
-func (env TestingEnvironment) IsOperatorReady() (bool, error) {
- pod, err := env.GetOperatorPod()
+// IsReady ensures that the operator will be ready.
+func IsReady(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+) (bool, error) {
+ pod, err := GetPod(ctx, crudClient)
if err != nil {
return false, err
}
@@ -211,7 +228,7 @@ func (env TestingEnvironment) IsOperatorReady() (bool, error) {
// If the operator is managing certificates for webhooks, check that the setup is completed
if !webhookManagedByOLM {
- err = CheckWebhookReady(&env, namespace)
+ err = checkWebhookReady(ctx, crudClient, kubeInterface, namespace)
if err != nil {
return false, err
}
@@ -230,7 +247,12 @@ func (env TestingEnvironment) IsOperatorReady() (bool, error) {
},
},
}
- _, err = CreateObject(&env, testCluster, &ctrlclient.CreateOptions{DryRun: []string{metav1.DryRunAll}})
+ _, err = objects.Create(
+ ctx,
+ crudClient,
+ testCluster,
+ &client.CreateOptions{DryRun: []string{metav1.DryRunAll}},
+ )
if err != nil {
return false, err
}
@@ -238,11 +260,11 @@ func (env TestingEnvironment) IsOperatorReady() (bool, error) {
return true, err
}
-// IsOperatorDeploymentReady returns true if the operator deployment has the expected number
+// IsDeploymentReady returns true if the operator deployment has the expected number
// of ready pods.
// It returns an error if there was a problem getting the operator deployment
-func (env *TestingEnvironment) IsOperatorDeploymentReady() (bool, error) {
- operatorDeployment, err := env.GetOperatorDeployment()
+func IsDeploymentReady(ctx context.Context, crudClient client.Client) (bool, error) {
+ operatorDeployment, err := GetDeployment(ctx, crudClient)
if err != nil {
return false, err
}
@@ -257,8 +279,8 @@ func (env *TestingEnvironment) IsOperatorDeploymentReady() (bool, error) {
}
// ScaleOperatorDeployment will scale the operator to n replicas and return error in case of failure
-func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error {
- operatorDeployment, err := env.GetOperatorDeployment()
+func ScaleOperatorDeployment(ctx context.Context, crudClient client.Client, replicas int32) error {
+ operatorDeployment, err := GetDeployment(ctx, crudClient)
if err != nil {
return err
}
@@ -267,14 +289,14 @@ func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error {
updatedOperatorDeployment.Spec.Replicas = ptr.To(replicas)
// Scale down operator deployment to zero replicas
- err = env.Client.Patch(env.Ctx, &updatedOperatorDeployment, ctrlclient.MergeFrom(&operatorDeployment))
+ err = crudClient.Patch(ctx, &updatedOperatorDeployment, client.MergeFrom(&operatorDeployment))
if err != nil {
return err
}
return retry.Do(
func() error {
- _, err := env.IsOperatorDeploymentReady()
+ _, err := IsDeploymentReady(ctx, crudClient)
return err
},
retry.Delay(time.Second),
@@ -282,13 +304,13 @@ func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error {
)
}
-// OperatorPodRenamed checks if the operator pod was renamed
-func OperatorPodRenamed(operatorPod corev1.Pod, expectedOperatorPodName string) bool {
+// PodRenamed checks if the operator pod was renamed
+func PodRenamed(operatorPod corev1.Pod, expectedOperatorPodName string) bool {
return operatorPod.GetName() != expectedOperatorPodName
}
-// OperatorPodRestarted checks if the operator pod was restarted
-func OperatorPodRestarted(operatorPod corev1.Pod) bool {
+// PodRestarted checks if the operator pod was restarted
+func PodRestarted(operatorPod corev1.Pod) bool {
restartCount := 0
for _, containerStatus := range operatorPod.Status.ContainerStatuses {
if containerStatus.Name == "manager" {
@@ -298,10 +320,10 @@ func OperatorPodRestarted(operatorPod corev1.Pod) bool {
return restartCount != 0
}
-// GetOperatorPodName returns the name of the current operator pod
+// GetPodName returns the name of the current operator pod
// NOTE: will return an error if the pod is being deleted
-func GetOperatorPodName(env *TestingEnvironment) (string, error) {
- pod, err := env.GetOperatorPod()
+func GetPodName(ctx context.Context, crudClient client.Client) (string, error) {
+ pod, err := GetPod(ctx, crudClient)
if err != nil {
return "", err
}
@@ -312,16 +334,16 @@ func GetOperatorPodName(env *TestingEnvironment) (string, error) {
return pod.GetName(), nil
}
-// HasOperatorBeenUpgraded determines if the operator has been upgraded by checking
+// HasBeenUpgraded determines if the operator has been upgraded by checking
// if there is a deletion timestamp. If there isn't, it returns true
-func HasOperatorBeenUpgraded(env *TestingEnvironment) bool {
- _, err := GetOperatorPodName(env)
+func HasBeenUpgraded(ctx context.Context, crudClient client.Client) bool {
+ _, err := GetPodName(ctx, crudClient)
return err == nil
}
-// GetOperatorVersion returns the current operator version
-func GetOperatorVersion(namespace, podName string) (string, error) {
- out, _, err := RunUnchecked(fmt.Sprintf(
+// Version returns the current operator version
+func Version(namespace, podName string) (string, error) {
+ out, _, err := run.Unchecked(fmt.Sprintf(
"kubectl -n %v exec %v -c manager -- /manager version",
namespace,
podName,
@@ -334,9 +356,9 @@ func GetOperatorVersion(namespace, podName string) (string, error) {
return ver, nil
}
-// GetOperatorArchitectures returns all the supported operator architectures
-func GetOperatorArchitectures(operatorPod *corev1.Pod) ([]string, error) {
- out, _, err := RunUnchecked(fmt.Sprintf(
+// Architectures returns all the supported operator architectures
+func Architectures(operatorPod *corev1.Pod) ([]string, error) {
+ out, _, err := run.Unchecked(fmt.Sprintf(
"kubectl -n %v exec %v -c manager -- /manager debug show-architectures",
operatorPod.Namespace,
operatorPod.Name,
@@ -354,3 +376,17 @@ func GetOperatorArchitectures(operatorPod *corev1.Pod) ([]string, error) {
return res, err
}
+
+// GetLeaderInfoFromLease gathers leader holderIdentity from the lease
+func GetLeaderInfoFromLease(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ operatorNamespace string,
+) (string, error) {
+ leaseInterface := kubeInterface.CoordinationV1().Leases(operatorNamespace)
+ lease, err := leaseInterface.Get(ctx, controller.LeaderElectionID, metav1.GetOptions{})
+ if err != nil {
+ return "", err
+ }
+ return *lease.Spec.HolderIdentity, nil
+}
diff --git a/tests/utils/release.go b/tests/utils/operator/release.go
similarity index 97%
rename from tests/utils/release.go
rename to tests/utils/operator/release.go
index 6b480e1957..af372f0ffb 100644
--- a/tests/utils/release.go
+++ b/tests/utils/operator/release.go
@@ -14,8 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package utils contains helper functions/methods for e2e
-package utils
+package operator
import (
"errors"
diff --git a/tests/utils/release_test.go b/tests/utils/operator/release_test.go
similarity index 95%
rename from tests/utils/release_test.go
rename to tests/utils/operator/release_test.go
index a65ea5b58f..611141572e 100644
--- a/tests/utils/release_test.go
+++ b/tests/utils/operator/release_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package operator
import (
"os"
@@ -27,6 +27,8 @@ import (
. "github.com/onsi/gomega"
)
+const releaseDirectoryPath = "../../../releases"
+
var _ = Describe("Release tag extraction", func() {
It("properly works with expected filename", func() {
tag, err := extractTag("cnpg-0.5.0.yaml")
@@ -42,7 +44,7 @@ var _ = Describe("Release tag extraction", func() {
var _ = Describe("Most recent tag", func() {
It("properly works with release branch", func() {
- releasesDir, err := filepath.Abs("../../releases")
+ releasesDir, err := filepath.Abs(releaseDirectoryPath)
Expect(err).ToNot(HaveOccurred())
versionList, err := GetAvailableReleases(releasesDir)
@@ -60,7 +62,7 @@ var _ = Describe("Most recent tag", func() {
})
It("properly works with dev branch", func() {
- releasesDir, err := filepath.Abs("../../releases")
+ releasesDir, err := filepath.Abs(releaseDirectoryPath)
Expect(err).ToNot(HaveOccurred())
GinkgoT().Setenv("BRANCH_NAME", "dev/"+versions.Version)
diff --git a/tests/utils/monitoring.go b/tests/utils/operator/suite_test.go
similarity index 51%
rename from tests/utils/monitoring.go
rename to tests/utils/operator/suite_test.go
index 2a7c12b3b4..b49f44d833 100644
--- a/tests/utils/monitoring.go
+++ b/tests/utils/operator/suite_test.go
@@ -14,24 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package operator
import (
- monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
- "k8s.io/apimachinery/pkg/types"
-)
+ "testing"
-// GetPodMonitor gathers the current PodMonitor in a namespace
-func (env TestingEnvironment) GetPodMonitor(namespace string, name string) (*monitoringv1.PodMonitor, error) {
- podMonitor := &monitoringv1.PodMonitor{}
- namespacedName := types.NamespacedName{
- Namespace: namespace,
- Name: name,
- }
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
- err := GetObject(&env, namespacedName, podMonitor)
- if err != nil {
- return nil, err
- }
- return podMonitor, nil
+func TestUtils(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Utils test release suite")
}
diff --git a/tests/utils/upgrade.go b/tests/utils/operator/upgrade.go
similarity index 70%
rename from tests/utils/upgrade.go
rename to tests/utils/operator/upgrade.go
index c8fa832ffd..e8a2e7af21 100644
--- a/tests/utils/upgrade.go
+++ b/tests/utils/operator/upgrade.go
@@ -14,9 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package operator provide functions to handle operator install/uninstall process
+package operator
import (
+ "context"
"fmt"
corev1 "k8s.io/api/core/v1"
@@ -24,25 +26,34 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
. "github.com/onsi/ginkgo/v2" // nolint
. "github.com/onsi/gomega" // nolint
)
-// CreateOperatorConfigurationMap creates the operator namespace and enables/disable the online upgrade for
+// CreateConfigMap creates the operator namespace and enables/disable the online upgrade for
// the instance manager
-func CreateOperatorConfigurationMap(pgOperatorNamespace, configName string, isOnline bool, env *TestingEnvironment) {
+func CreateConfigMap(
+ ctx context.Context,
+ crudClient client.Client,
+ pgOperatorNamespace, configName string,
+ isOnline bool,
+) {
By("creating operator namespace", func() {
// Create a upgradeNamespace for all the resources
namespacedName := types.NamespacedName{
Name: pgOperatorNamespace,
}
namespaceResource := &corev1.Namespace{}
- err := env.Client.Get(env.Ctx, namespacedName, namespaceResource)
+ err := crudClient.Get(ctx, namespacedName, namespaceResource)
if apierrors.IsNotFound(err) {
- err = env.CreateNamespace(pgOperatorNamespace)
+ err = namespaces.CreateNamespace(ctx, crudClient, pgOperatorNamespace)
Expect(err).ToNot(HaveOccurred())
} else if err != nil {
Expect(err).ToNot(HaveOccurred())
@@ -61,19 +72,22 @@ func CreateOperatorConfigurationMap(pgOperatorNamespace, configName string, isOn
},
Data: map[string]string{"ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES": enable},
}
- _, err := CreateObject(env, configMap)
+ _, err := objects.Create(ctx, crudClient, configMap)
Expect(err).NotTo(HaveOccurred())
})
}
-// InstallLatestCNPGOperator installs an operator version with the most recent release tag
-func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) {
+// InstallLatest installs an operator version with the most recent release tag
+func InstallLatest(
+ crudClient client.Client,
+ releaseTag string,
+) {
mostRecentReleasePath := "../../releases/cnpg-" + releaseTag + ".yaml"
Eventually(func() error {
GinkgoWriter.Printf("installing: %s\n", mostRecentReleasePath)
- _, stderr, err := RunUnchecked("kubectl apply --server-side --force-conflicts -f " + mostRecentReleasePath)
+ _, stderr, err := run.Unchecked("kubectl apply --server-side --force-conflicts -f " + mostRecentReleasePath)
if err != nil {
GinkgoWriter.Printf("stderr: %s\n", stderr)
}
@@ -82,14 +96,14 @@ func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) {
}, 60).ShouldNot(HaveOccurred())
Eventually(func() error {
- _, _, err := RunUnchecked(
+ _, _, err := run.Unchecked(
"kubectl wait --for condition=established --timeout=60s " +
"crd/clusters.postgresql.cnpg.io")
return err
}, 150).ShouldNot(HaveOccurred())
Eventually(func() error {
- mapping, err := env.Client.RESTMapper().RESTMapping(
+ mapping, err := crudClient.RESTMapper().RESTMapping(
schema.GroupKind{Group: apiv1.GroupVersion.Group, Kind: apiv1.ClusterKind},
apiv1.GroupVersion.Version)
if err != nil {
@@ -102,7 +116,7 @@ func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) {
}, 150).ShouldNot(HaveOccurred())
Eventually(func() error {
- _, _, err := RunUnchecked(
+ _, _, err := run.Unchecked(
"kubectl wait --for=condition=Available --timeout=2m -n cnpg-system " +
"deployments cnpg-controller-manager")
return err
diff --git a/tests/utils/webhooks.go b/tests/utils/operator/webhooks.go
similarity index 65%
rename from tests/utils/webhooks.go
rename to tests/utils/operator/webhooks.go
index 210e541aa2..b4d94462a5 100644
--- a/tests/utils/webhooks.go
+++ b/tests/utils/operator/webhooks.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package operator
import (
"bytes"
@@ -25,17 +25,24 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
)
-// GetCNPGsMutatingWebhookByName get the MutatingWebhook filtered by the name of one
+// GetMutatingWebhookByName get the MutatingWebhook filtered by the name of one
// of the webhooks
-func GetCNPGsMutatingWebhookByName(env *TestingEnvironment, name string) (
+func GetMutatingWebhookByName(
+ ctx context.Context,
+ crudClient client.Client,
+ name string,
+) (
*admissionregistrationv1.MutatingWebhookConfiguration, int, error,
) {
var mWebhooks admissionregistrationv1.MutatingWebhookConfigurationList
- err := GetObjectList(env, &mWebhooks)
+ err := objects.List(ctx, crudClient, &mWebhooks)
if err != nil {
return nil, 0, err
}
@@ -50,12 +57,13 @@ func GetCNPGsMutatingWebhookByName(env *TestingEnvironment, name string) (
return nil, 0, fmt.Errorf("mutating webhook not found")
}
-// UpdateCNPGsMutatingWebhookConf update MutatingWebhookConfiguration object
-func UpdateCNPGsMutatingWebhookConf(env *TestingEnvironment,
+// UpdateMutatingWebhookConf update MutatingWebhookConfiguration object
+func UpdateMutatingWebhookConf(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
wh *admissionregistrationv1.MutatingWebhookConfiguration,
) error {
- ctx := context.Background()
- _, err := env.Interface.AdmissionregistrationV1().
+ _, err := kubeInterface.AdmissionregistrationV1().
MutatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{})
if err != nil {
return err
@@ -63,12 +71,12 @@ func UpdateCNPGsMutatingWebhookConf(env *TestingEnvironment,
return nil
}
-// GetCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator
-func GetCNPGsValidatingWebhookConf(env *TestingEnvironment) (
+// getCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator
+func getCNPGsValidatingWebhookConf(kubeInterface kubernetes.Interface) (
*admissionregistrationv1.ValidatingWebhookConfiguration, error,
) {
ctx := context.Background()
- validatingWebhookConfig, err := env.Interface.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(
+ validatingWebhookConfig, err := kubeInterface.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(
ctx, controller.ValidatingWebhookConfigurationName, metav1.GetOptions{})
if err != nil {
return nil, err
@@ -76,13 +84,17 @@ func GetCNPGsValidatingWebhookConf(env *TestingEnvironment) (
return validatingWebhookConfig, nil
}
-// GetCNPGsValidatingWebhookByName get ValidatingWebhook by the name of one
+// GetValidatingWebhookByName get ValidatingWebhook by the name of one
// of the webhooks
-func GetCNPGsValidatingWebhookByName(env *TestingEnvironment, name string) (
+func GetValidatingWebhookByName(
+ ctx context.Context,
+ crudClient client.Client,
+ name string,
+) (
*admissionregistrationv1.ValidatingWebhookConfiguration, int, error,
) {
var vWebhooks admissionregistrationv1.ValidatingWebhookConfigurationList
- err := GetObjectList(env, &vWebhooks)
+ err := objects.List(ctx, crudClient, &vWebhooks)
if err != nil {
return nil, 0, err
}
@@ -97,12 +109,13 @@ func GetCNPGsValidatingWebhookByName(env *TestingEnvironment, name string) (
return nil, 0, fmt.Errorf("validating webhook not found")
}
-// UpdateCNPGsValidatingWebhookConf update the ValidatingWebhook object
-func UpdateCNPGsValidatingWebhookConf(env *TestingEnvironment,
+// UpdateValidatingWebhookConf update the ValidatingWebhook object
+func UpdateValidatingWebhookConf(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
wh *admissionregistrationv1.ValidatingWebhookConfiguration,
) error {
- ctx := context.Background()
- _, err := env.Interface.AdmissionregistrationV1().
+ _, err := kubeInterface.AdmissionregistrationV1().
ValidatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{})
if err != nil {
return err
@@ -110,22 +123,27 @@ func UpdateCNPGsValidatingWebhookConf(env *TestingEnvironment,
return nil
}
-// CheckWebhookReady ensures that the operator has finished the webhook setup.
-func CheckWebhookReady(env *TestingEnvironment, namespace string) error {
+// checkWebhookReady ensures that the operator has finished the webhook setup.
+func checkWebhookReady(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ namespace string,
+) error {
// Check CA
secret := &corev1.Secret{}
secretNamespacedName := types.NamespacedName{
Namespace: namespace,
Name: controller.WebhookSecretName,
}
- err := GetObject(env, secretNamespacedName, secret)
+ err := objects.Get(ctx, crudClient, secretNamespacedName, secret)
if err != nil {
return err
}
ca := secret.Data["tls.crt"]
- mutatingWebhookConfig, err := env.GetCNPGsMutatingWebhookConf()
+ mutatingWebhookConfig, err := getCNPGsMutatingWebhookConf(ctx, kubeInterface)
if err != nil {
return err
}
@@ -137,7 +155,7 @@ func CheckWebhookReady(env *TestingEnvironment, namespace string) error {
}
}
- validatingWebhookConfig, err := GetCNPGsValidatingWebhookConf(env)
+ validatingWebhookConfig, err := getCNPGsValidatingWebhookConf(kubeInterface)
if err != nil {
return err
}
@@ -152,12 +170,14 @@ func CheckWebhookReady(env *TestingEnvironment, namespace string) error {
return nil
}
-// GetCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator
-func (env TestingEnvironment) GetCNPGsMutatingWebhookConf() (
+// getCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator
+func getCNPGsMutatingWebhookConf(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+) (
*admissionregistrationv1.MutatingWebhookConfiguration, error,
) {
- ctx := context.Background()
- return env.Interface.AdmissionregistrationV1().
+ return kubeInterface.AdmissionregistrationV1().
MutatingWebhookConfigurations().
Get(ctx, controller.MutatingWebhookConfigurationName, metav1.GetOptions{})
}
diff --git a/tests/utils/pod.go b/tests/utils/pod.go
deleted file mode 100644
index e439d0e00f..0000000000
--- a/tests/utils/pod.go
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "regexp"
- "strings"
- "time"
-
- "github.com/avast/retry-go/v4"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
- pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
-
- . "github.com/onsi/gomega" // nolint
-)
-
-// PodCreateAndWaitForReady creates a given pod object and wait for it to be ready
-func PodCreateAndWaitForReady(env *TestingEnvironment, pod *corev1.Pod, timeoutSeconds uint) error {
- _, err := CreateObject(env, pod)
- if err != nil {
- return err
- }
- return PodWaitForReady(env, pod, timeoutSeconds)
-}
-
-// PodWaitForReady waits for a pod to be ready
-func PodWaitForReady(env *TestingEnvironment, pod *corev1.Pod, timeoutSeconds uint) error {
- err := retry.Do(
- func() error {
- if err := env.Client.Get(env.Ctx, client.ObjectKey{
- Namespace: pod.Namespace,
- Name: pod.Name,
- }, pod); err != nil {
- return err
- }
- if !pkgutils.IsPodReady(*pod) {
- return fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name)
- }
- return nil
- },
- retry.Attempts(timeoutSeconds),
- retry.Delay(time.Second),
- retry.DelayType(retry.FixedDelay),
- )
- return err
-}
-
-// PodHasLabels verifies that the labels of a pod contain a specified
-// labels map
-func PodHasLabels(pod corev1.Pod, labels map[string]string) bool {
- podLabels := pod.Labels
- for k, v := range labels {
- val, ok := podLabels[k]
- if !ok || (v != val) {
- return false
- }
- }
- return true
-}
-
-// PodHasAnnotations verifies that the annotations of a pod contain a specified
-// annotations map
-func PodHasAnnotations(pod corev1.Pod, annotations map[string]string) bool {
- podAnnotations := pod.Annotations
- for k, v := range annotations {
- val, ok := podAnnotations[k]
- if !ok || (v != val) {
- return false
- }
- }
- return true
-}
-
-// PodHasCondition verifies that a pod has a specified condition
-func PodHasCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, status corev1.ConditionStatus) bool {
- for _, cond := range pod.Status.Conditions {
- if cond.Type == conditionType && cond.Status == status {
- return true
- }
- }
- return false
-}
-
-// DeletePod deletes a pod if existent
-func (env TestingEnvironment) DeletePod(namespace string, name string, opts ...client.DeleteOption) error {
- u := &unstructured.Unstructured{}
- u.SetName(name)
- u.SetNamespace(namespace)
- u.SetGroupVersionKind(schema.GroupVersionKind{
- Group: "",
- Version: "v1",
- Kind: "Pod",
- })
-
- return DeleteObject(&env, u, opts...)
-}
-
-// GetPodLogs gathers pod logs
-func (env TestingEnvironment) GetPodLogs(namespace string, podName string) (string, error) {
- req := env.Interface.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{})
- podLogs, err := req.Stream(env.Ctx)
- if err != nil {
- return "", err
- }
- defer func() {
- innerErr := podLogs.Close()
- if err == nil && innerErr != nil {
- err = innerErr
- }
- }()
-
- // Create a buffer to hold JSON data
- buf := new(bytes.Buffer)
- _, err = io.Copy(buf, podLogs)
- if err != nil {
- return "", err
- }
- return buf.String(), nil
-}
-
-// GetPodList gathers the current list of pods in a namespace
-func (env TestingEnvironment) GetPodList(namespace string) (*corev1.PodList, error) {
- podList := &corev1.PodList{}
- err := GetObjectList(
- &env, podList, client.InNamespace(namespace),
- )
- return podList, err
-}
-
-// GetManagerVersion returns the current manager version of a given pod
-func GetManagerVersion(namespace, podName string) (string, error) {
- out, _, err := RunUnchecked(fmt.Sprintf(
- "kubectl -n %v exec %v -c postgres -- /controller/manager version",
- namespace,
- podName,
- ))
- if err != nil {
- return "", err
- }
- versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`)
- ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1]
- return ver, nil
-}
-
-// GetPod gets a pod by namespace and name
-func (env TestingEnvironment) GetPod(namespace, podName string) (*corev1.Pod, error) {
- wrapErr := func(err error) error {
- return fmt.Errorf("while getting pod '%s/%s': %w", namespace, podName, err)
- }
- podList, err := env.GetPodList(namespace)
- if err != nil {
- return nil, wrapErr(err)
- }
- for _, pod := range podList.Items {
- if podName == pod.Name {
- return &pod, nil
- }
- }
- return nil, wrapErr(errors.New("pod not found"))
-}
-
-// ContainerLocator contains the necessary data to find a container on a pod
-type ContainerLocator struct {
- Namespace string
- PodName string
- ContainerName string
-}
-
-// ExecCommandInContainer executes commands in a given instance pod, in the
-// postgres container
-func (env TestingEnvironment) ExecCommandInContainer(
- container ContainerLocator,
- timeout *time.Duration,
- command ...string,
-) (string, string, error) {
- wrapErr := func(err error) error {
- return fmt.Errorf("while executing command in pod '%s/%s': %w",
- container.Namespace, container.PodName, err)
- }
- pod, err := env.GetPod(container.Namespace, container.PodName)
- if err != nil {
- return "", "", wrapErr(err)
- }
- if !pkgutils.IsPodReady(*pod) {
- return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name)
- }
- return env.ExecCommand(env.Ctx, *pod, container.ContainerName, timeout, command...)
-}
-
-// PodLocator contains the necessary data to find a pod
-type PodLocator struct {
- Namespace string
- PodName string
-}
-
-// ExecCommandInInstancePod executes commands in a given instance pod, in the
-// postgres container
-func (env TestingEnvironment) ExecCommandInInstancePod(
- podLocator PodLocator,
- timeout *time.Duration,
- command ...string,
-) (string, string, error) {
- return env.ExecCommandInContainer(
- ContainerLocator{
- Namespace: podLocator.Namespace,
- PodName: podLocator.PodName,
- ContainerName: specs.PostgresContainerName,
- }, timeout, command...)
-}
-
-// DatabaseName is a special type for the database argument in an Exec call
-type DatabaseName string
-
-// ExecQueryInInstancePod executes a query in an instance pod, by connecting to the pod
-// and the postgres container, and using a local connection with the postgres user
-func (env TestingEnvironment) ExecQueryInInstancePod(
- podLocator PodLocator,
- dbname DatabaseName,
- query string,
-) (string, string, error) {
- timeout := time.Second * 10
- return env.ExecCommandInInstancePod(
- PodLocator{
- Namespace: podLocator.Namespace,
- PodName: podLocator.PodName,
- }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query)
-}
-
-// EventuallyExecQueryInInstancePod wraps ExecQueryInInstancePod with an Eventually clause
-func (env TestingEnvironment) EventuallyExecQueryInInstancePod(
- podLocator PodLocator,
- dbname DatabaseName,
- query string,
- retryTimeout int,
- pollingTime int,
-) (string, string, error) {
- var stdOut, stdErr string
- var err error
-
- Eventually(func() error {
- stdOut, stdErr, err = env.ExecQueryInInstancePod(
- PodLocator{
- Namespace: podLocator.Namespace,
- PodName: podLocator.PodName,
- }, dbname, query)
- return err
- }, retryTimeout, pollingTime).Should(Succeed())
-
- return stdOut, stdErr, err
-}
diff --git a/tests/utils/pods/pod.go b/tests/utils/pods/pod.go
new file mode 100644
index 0000000000..1db187e89d
--- /dev/null
+++ b/tests/utils/pods/pod.go
@@ -0,0 +1,194 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package pods provides pod utilities to manage pods inside K8s
+package pods
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/avast/retry-go/v4"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+)
+
+// List gathers the current list of pods in a namespace
+func List(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*v1.PodList, error) {
+ podList := &v1.PodList{}
+ err := objects.List(
+ ctx, crudClient, podList, client.InNamespace(namespace),
+ )
+ return podList, err
+}
+
+// Delete deletes a pod if existent
+func Delete(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, name string,
+ opts ...client.DeleteOption,
+) error {
+ u := &unstructured.Unstructured{}
+ u.SetName(name)
+ u.SetNamespace(namespace)
+ u.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: "",
+ Version: "v1",
+ Kind: "Pod",
+ })
+
+ return objects.Delete(ctx, crudClient, u, opts...)
+}
+
+// CreateAndWaitForReady creates a given pod object and wait for it to be ready
+func CreateAndWaitForReady(
+ ctx context.Context,
+ crudClient client.Client,
+ pod *v1.Pod,
+ timeoutSeconds uint,
+) error {
+ _, err := objects.Create(ctx, crudClient, pod)
+ if err != nil {
+ return err
+ }
+ return waitForReady(ctx, crudClient, pod, timeoutSeconds)
+}
+
+// waitForReady waits for a pod to be ready
+func waitForReady(
+ ctx context.Context,
+ crudClient client.Client,
+ pod *v1.Pod,
+ timeoutSeconds uint,
+) error {
+ err := retry.Do(
+ func() error {
+ if err := crudClient.Get(ctx, client.ObjectKey{
+ Namespace: pod.Namespace,
+ Name: pod.Name,
+ }, pod); err != nil {
+ return err
+ }
+ if !utils.IsPodReady(*pod) {
+ return fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name)
+ }
+ return nil
+ },
+ retry.Attempts(timeoutSeconds),
+ retry.Delay(time.Second),
+ retry.DelayType(retry.FixedDelay),
+ )
+ return err
+}
+
+// Logs gathers pod logs
+func Logs(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ namespace, podName string,
+) (string, error) {
+ req := kubeInterface.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{})
+ podLogs, err := req.Stream(ctx)
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ innerErr := podLogs.Close()
+ if err == nil && innerErr != nil {
+ err = innerErr
+ }
+ }()
+
+ // Create a buffer to hold JSON data
+ buf := new(bytes.Buffer)
+ _, err = io.Copy(buf, podLogs)
+ if err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// Get gets a pod by namespace and name
+func Get(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, podName string,
+) (*v1.Pod, error) {
+ wrapErr := func(err error) error {
+ return fmt.Errorf("while getting pod '%s/%s': %w", namespace, podName, err)
+ }
+ podList, err := List(ctx, crudClient, namespace)
+ if err != nil {
+ return nil, wrapErr(err)
+ }
+ for _, pod := range podList.Items {
+ if podName == pod.Name {
+ return &pod, nil
+ }
+ }
+ return nil, wrapErr(errors.New("pod not found"))
+}
+
+// HasLabels verifies that the labels of a pod contain a specified
+// labels map
+func HasLabels(pod v1.Pod, labels map[string]string) bool {
+ podLabels := pod.Labels
+ for k, v := range labels {
+ val, ok := podLabels[k]
+ if !ok || (v != val) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAnnotations verifies that the annotations of a pod contain a specified
+// annotations map
+func HasAnnotations(pod v1.Pod, annotations map[string]string) bool {
+ podAnnotations := pod.Annotations
+ for k, v := range annotations {
+ val, ok := podAnnotations[k]
+ if !ok || (v != val) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasCondition verifies that a pod has a specified condition
+func HasCondition(pod *v1.Pod, conditionType v1.PodConditionType, status v1.ConditionStatus) bool {
+ for _, cond := range pod.Status.Conditions {
+ if cond.Type == conditionType && cond.Status == status {
+ return true
+ }
+ }
+ return false
+}
diff --git a/tests/utils/postgres.go b/tests/utils/postgres.go
deleted file mode 100644
index 9c4011c9f1..0000000000
--- a/tests/utils/postgres.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "strconv"
- "strings"
-
- corev1 "k8s.io/api/core/v1"
-)
-
-const (
- // PGLocalSocketDir is the directory containing the PostgreSQL local socket
- PGLocalSocketDir = "/controller/run"
- // AppUser for app user
- AppUser = "app"
- // PostgresUser for postgres user
- PostgresUser = "postgres"
- // AppDBName database name app
- AppDBName = "app"
- // PostgresDBName database name postgres
- PostgresDBName = "postgres"
- // TablespaceDefaultName is the default tablespace location
- TablespaceDefaultName = "pg_default"
-)
-
-// CountReplicas counts the number of replicas attached to an instance
-func CountReplicas(env *TestingEnvironment, pod *corev1.Pod) (int, error) {
- query := "SELECT count(*) FROM pg_stat_replication"
- stdOut, _, err := env.EventuallyExecQueryInInstancePod(
- PodLocator{
- Namespace: pod.Namespace,
- PodName: pod.Name,
- }, AppDBName,
- query,
- RetryTimeout,
- PollingTime,
- )
- if err != nil {
- return 0, nil
- }
- return strconv.Atoi(strings.Trim(stdOut, "\n"))
-}
diff --git a/tests/utils/postgres/doc.go b/tests/utils/postgres/doc.go
new file mode 100644
index 0000000000..f394238a09
--- /dev/null
+++ b/tests/utils/postgres/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package postgres provides functions to handle postgres in cnpg clusters
+package postgres
diff --git a/tests/utils/postgres/postgres.go b/tests/utils/postgres/postgres.go
new file mode 100644
index 0000000000..5db3eb088d
--- /dev/null
+++ b/tests/utils/postgres/postgres.go
@@ -0,0 +1,133 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postgres
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/cloudnative-pg/machinery/pkg/image/reference"
+ "github.com/cloudnative-pg/machinery/pkg/postgres/version"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
+)
+
+const (
+ // PGLocalSocketDir is the directory containing the PostgreSQL local socket
+ PGLocalSocketDir = "/controller/run"
+ // AppUser for app user
+ AppUser = "app"
+ // PostgresUser for postgres user
+ PostgresUser = "postgres"
+ // AppDBName database name app
+ AppDBName = "app"
+ // PostgresDBName database name postgres
+ PostgresDBName = "postgres"
+ // TablespaceDefaultName is the default tablespace location
+ TablespaceDefaultName = "pg_default"
+)
+
+// CountReplicas counts the number of replicas attached to an instance
+func CountReplicas(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ pod *corev1.Pod,
+ retryTimeout int,
+) (int, error) {
+ query := "SELECT count(*) FROM pg_stat_replication"
+ stdOut, _, err := exec.EventuallyExecQueryInInstancePod(
+ ctx, crudClient, kubeInterface, restConfig,
+ exec.PodLocator{
+ Namespace: pod.Namespace,
+ PodName: pod.Name,
+ }, AppDBName,
+ query,
+ retryTimeout,
+ objects.PollingTime,
+ )
+ if err != nil {
+ return 0, nil
+ }
+ return strconv.Atoi(strings.Trim(stdOut, "\n"))
+}
+
+// GetCurrentTimestamp getting current time stamp from postgres server
+func GetCurrentTimestamp(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ namespace, clusterName string,
+) (string, error) {
+ row, err := RunQueryRowOverForward(
+ ctx,
+ crudClient,
+ kubeInterface,
+ restConfig,
+ namespace,
+ clusterName,
+ AppDBName,
+ v1.ApplicationUserSecretSuffix,
+ "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');",
+ )
+ if err != nil {
+ return "", err
+ }
+
+ var currentTimestamp string
+ if err = row.Scan(¤tTimestamp); err != nil {
+ return "", err
+ }
+
+ return currentTimestamp, nil
+}
+
+// BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available)
+func BumpPostgresImageMajorVersion(postgresImage string) (string, error) {
+ imageReference := reference.New(postgresImage)
+
+ postgresImageVersion, err := version.FromTag(imageReference.Tag)
+ if err != nil {
+ return "", err
+ }
+
+ targetPostgresImageMajorVersionInt := postgresImageVersion.Major() + 1
+
+ defaultImageVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag)
+ if err != nil {
+ return "", err
+ }
+
+ if targetPostgresImageMajorVersionInt >= defaultImageVersion.Major() {
+ return postgresImage, nil
+ }
+
+ imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion.Major()+1)
+
+ return imageReference.GetNormalizedName(), nil
+}
diff --git a/tests/utils/version_test.go b/tests/utils/postgres/postgres_test.go
similarity index 98%
rename from tests/utils/version_test.go
rename to tests/utils/postgres/postgres_test.go
index 64c7ca13e8..bc449cd4e5 100644
--- a/tests/utils/version_test.go
+++ b/tests/utils/postgres/postgres_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package postgres
import (
"bytes"
diff --git a/tests/utils/psql_connection.go b/tests/utils/postgres/psql_connection.go
similarity index 79%
rename from tests/utils/psql_connection.go
rename to tests/utils/postgres/psql_connection.go
index d3a24cc40a..051a9e234e 100644
--- a/tests/utils/psql_connection.go
+++ b/tests/utils/postgres/psql_connection.go
@@ -14,19 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+package postgres
import (
+ "context"
"database/sql"
"io"
"time"
"k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/configfile"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils/forwardconnection"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets"
)
// PSQLForwardConnection manages the creation of a port-forwarding to open a new database connection
@@ -103,38 +109,50 @@ func startForwardConnection(
// ForwardPSQLConnection simplifies the creation of forwarded connection to PostgreSQL cluster
func ForwardPSQLConnection(
- env *TestingEnvironment,
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
namespace,
clusterName,
dbname,
secretSuffix string,
) (*PSQLForwardConnection, *sql.DB, error) {
- user, pass, err := GetCredentials(clusterName, namespace, secretSuffix, env)
+ user, pass, err := secrets.GetCredentials(ctx, crudClient, clusterName, namespace, secretSuffix)
if err != nil {
return nil, nil, err
}
- return ForwardPSQLConnectionWithCreds(env, namespace, clusterName, dbname, user, pass)
+ return ForwardPSQLConnectionWithCreds(
+ ctx,
+ crudClient,
+ kubeInterface,
+ restConfig,
+ namespace, clusterName, dbname, user, pass,
+ )
}
// ForwardPSQLConnectionWithCreds creates a forwarded connection to a PostgreSQL cluster
// using the given credentials
func ForwardPSQLConnectionWithCreds(
- env *TestingEnvironment,
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
namespace,
clusterName,
dbname,
userApp,
passApp string,
) (*PSQLForwardConnection, *sql.DB, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
if err != nil {
return nil, nil, err
}
dialer, err := forwardconnection.NewDialer(
- env.Interface,
- env.RestClientConfig,
+ kubeInterface,
+ restConfig,
namespace,
cluster.Status.CurrentPrimary,
)
@@ -159,7 +177,9 @@ func ForwardPSQLConnectionWithCreds(
// ForwardPSQLServiceConnection creates a forwarded connection to a PostgreSQL service
// using the given credentials
func ForwardPSQLServiceConnection(
- env *TestingEnvironment,
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
namespace,
serviceName,
dbname,
@@ -167,9 +187,9 @@ func ForwardPSQLServiceConnection(
passApp string,
) (*PSQLForwardConnection, *sql.DB, error) {
dialer, portMap, err := forwardconnection.NewDialerFromService(
- env.Ctx,
- env.Interface,
- env.RestClientConfig,
+ ctx,
+ kubeInterface,
+ restConfig,
namespace,
serviceName,
)
@@ -187,7 +207,10 @@ func ForwardPSQLServiceConnection(
// RunQueryRowOverForward runs QueryRow with a given query, returning the Row of the SQL command
func RunQueryRowOverForward(
- env *TestingEnvironment,
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
namespace,
clusterName,
dbname,
@@ -195,7 +218,10 @@ func RunQueryRowOverForward(
query string,
) (*sql.Row, error) {
forward, conn, err := ForwardPSQLConnection(
- env,
+ ctx,
+ crudClient,
+ kubeInterface,
+ restConfig,
namespace,
clusterName,
dbname,
@@ -214,7 +240,10 @@ func RunQueryRowOverForward(
// RunExecOverForward runs Exec with a given query, returning the Result of the SQL command
func RunExecOverForward(
- env *TestingEnvironment,
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
namespace,
clusterName,
dbname,
@@ -222,7 +251,10 @@ func RunExecOverForward(
query string,
) (sql.Result, error) {
forward, conn, err := ForwardPSQLConnection(
- env,
+ ctx,
+ crudClient,
+ kubeInterface,
+ restConfig,
namespace,
clusterName,
dbname,
diff --git a/tests/utils/postgres/suite_test.go b/tests/utils/postgres/suite_test.go
new file mode 100644
index 0000000000..70d4a52fcb
--- /dev/null
+++ b/tests/utils/postgres/suite_test.go
@@ -0,0 +1,29 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postgres
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestUtils(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Utils test postgres suite")
+}
diff --git a/tests/utils/proxy.go b/tests/utils/proxy/proxy.go
similarity index 65%
rename from tests/utils/proxy.go
rename to tests/utils/proxy/proxy.go
index d17b477eb4..f4e0aded9f 100644
--- a/tests/utils/proxy.go
+++ b/tests/utils/proxy/proxy.go
@@ -14,18 +14,28 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package proxy provides functions to use the proxy subresource to call a pod
+package proxy
import (
+ "context"
"strconv"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/kubernetes"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
)
// runProxyRequest makes a GET call on the pod interface proxy, and returns the raw response
-func runProxyRequest(env *TestingEnvironment, pod *corev1.Pod, tlsEnabled bool, path string, port int) ([]byte, error) {
+func runProxyRequest(
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
+ pod *corev1.Pod,
+ tlsEnabled bool,
+ path string,
+ port int,
+) ([]byte, error) {
portString := strconv.Itoa(port)
schema := "http"
@@ -33,40 +43,43 @@ func runProxyRequest(env *TestingEnvironment, pod *corev1.Pod, tlsEnabled bool,
schema = "https"
}
- req := env.Interface.CoreV1().Pods(pod.Namespace).ProxyGet(
+ req := kubeInterface.CoreV1().Pods(pod.Namespace).ProxyGet(
schema, pod.Name, portString, path, map[string]string{})
- return req.DoRaw(env.Ctx)
+ return req.DoRaw(ctx)
}
// RetrieveMetricsFromInstance aims to retrieve the metrics from a PostgreSQL instance pod
// using a GET request on the pod interface proxy
func RetrieveMetricsFromInstance(
- env *TestingEnvironment,
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
pod corev1.Pod,
tlsEnabled bool,
) (string, error) {
- body, err := runProxyRequest(env, &pod, tlsEnabled, url.PathMetrics, int(url.PostgresMetricsPort))
+ body, err := runProxyRequest(ctx, kubeInterface, &pod, tlsEnabled, url.PathMetrics, int(url.PostgresMetricsPort))
return string(body), err
}
// RetrieveMetricsFromPgBouncer aims to retrieve the metrics from a PgBouncer pod
// using a GET request on the pod interface proxy
func RetrieveMetricsFromPgBouncer(
- env *TestingEnvironment,
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
pod corev1.Pod,
) (string, error) {
- body, err := runProxyRequest(env, &pod, false, url.PathMetrics, int(url.PgBouncerMetricsPort))
+ body, err := runProxyRequest(ctx, kubeInterface, &pod, false, url.PathMetrics, int(url.PgBouncerMetricsPort))
return string(body), err
}
// RetrievePgStatusFromInstance aims to retrieve the pgStatus from a PostgreSQL instance pod
// using a GET request on the pod interface proxy
func RetrievePgStatusFromInstance(
- env *TestingEnvironment,
+ ctx context.Context,
+ kubeInterface kubernetes.Interface,
pod corev1.Pod,
tlsEnabled bool,
) (string, error) {
- body, err := runProxyRequest(env, &pod, tlsEnabled, url.PathPgStatus, int(url.StatusPort))
+ body, err := runProxyRequest(ctx, kubeInterface, &pod, tlsEnabled, url.PathPgStatus, int(url.StatusPort))
return string(body), err
}
diff --git a/tests/utils/replication_slots.go b/tests/utils/replicationslot/replication_slots.go
similarity index 69%
rename from tests/utils/replication_slots.go
rename to tests/utils/replicationslot/replication_slots.go
index dab55b9e9e..6268e27eb9 100644
--- a/tests/utils/replication_slots.go
+++ b/tests/utils/replicationslot/replication_slots.go
@@ -14,36 +14,48 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package replicationslot provides functions to manage the replication slot of a
+// cnpg cluster
+package replicationslot
import (
+ "context"
"fmt"
"sort"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
"k8s.io/utils/ptr"
- ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec"
)
// PrintReplicationSlots prints replications slots with their restart_lsn
func PrintReplicationSlots(
- namespace,
- clusterName string,
- env *TestingEnvironment,
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ namespace, clusterName, dbName string,
) string {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName)
if err != nil {
return fmt.Sprintf("Couldn't retrieve the cluster's podlist: %v\n", err)
}
var output strings.Builder
for i, pod := range podList.Items {
- slots, err := GetReplicationSlotsOnPod(namespace, pod.GetName(), env)
+ slots, err := GetReplicationSlotsOnPod(
+ ctx, crudClient, kubeInterface, restConfig,
+ namespace, pod.GetName(), dbName,
+ )
if err != nil {
return fmt.Sprintf("Couldn't retrieve slots for pod %v: %v\n", pod.GetName(), err)
}
@@ -55,12 +67,13 @@ func PrintReplicationSlots(
m := make(map[string]string)
for _, slot := range slots {
query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot)
- restartLsn, _, err := env.ExecQueryInInstancePod(
- PodLocator{
+ restartLsn, _, err := exec.QueryInInstancePod(
+ ctx, crudClient, kubeInterface, restConfig,
+ exec.PodLocator{
Namespace: podList.Items[i].Namespace,
PodName: podList.Items[i].Name,
},
- AppDBName,
+ exec.DatabaseName(dbName),
query)
if err != nil {
output.WriteString(fmt.Sprintf("Couldn't retrieve restart_lsn for slot %v: %v\n", slot, err))
@@ -89,15 +102,16 @@ func AreSameLsn(lsnList []string) bool {
// GetExpectedHAReplicationSlotsOnPod returns a slice of replication slot names which should be present
// in a given pod
func GetExpectedHAReplicationSlotsOnPod(
+ ctx context.Context,
+ crudClient client.Client,
namespace, clusterName, podName string,
- env *TestingEnvironment,
) ([]string, error) {
- podList, err := env.GetClusterPodList(namespace, clusterName)
+ podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName)
if err != nil {
return nil, err
}
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
if err != nil {
return nil, err
}
@@ -115,24 +129,31 @@ func GetExpectedHAReplicationSlotsOnPod(
// GetReplicationSlotsOnPod returns a slice containing the names of the current replication slots present in
// a given pod
-func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment) ([]string, error) {
+func GetReplicationSlotsOnPod(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ namespace, podName, dbName string,
+) ([]string, error) {
namespacedName := types.NamespacedName{
Namespace: namespace,
Name: podName,
}
targetPod := &corev1.Pod{}
- err := env.Client.Get(env.Ctx, namespacedName, targetPod)
+ err := crudClient.Get(ctx, namespacedName, targetPod)
if err != nil {
return nil, err
}
query := "SELECT slot_name FROM pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'"
- stdout, _, err := env.ExecQueryInInstancePod(
- PodLocator{
+ stdout, _, err := exec.QueryInInstancePod(
+ ctx, crudClient, kubeInterface, restConfig,
+ exec.PodLocator{
Namespace: targetPod.Namespace,
PodName: targetPod.Name,
},
- AppDBName,
+ exec.DatabaseName(dbName),
query)
if err != nil {
return nil, err
@@ -150,11 +171,14 @@ func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment
// GetReplicationSlotLsnsOnPod returns a slice containing the current restart_lsn values of each
// replication slot present in a given pod
func GetReplicationSlotLsnsOnPod(
- namespace, clusterName string,
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ namespace, clusterName, dbName string,
pod corev1.Pod,
- env *TestingEnvironment,
) ([]string, error) {
- slots, err := GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env)
+ slots, err := GetExpectedHAReplicationSlotsOnPod(ctx, crudClient, namespace, clusterName, pod.GetName())
if err != nil {
return nil, err
}
@@ -163,12 +187,13 @@ func GetReplicationSlotLsnsOnPod(
for _, slot := range slots {
query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'",
slot)
- restartLsn, _, err := env.ExecQueryInInstancePod(
- PodLocator{
+ restartLsn, _, err := exec.QueryInInstancePod(
+ ctx, crudClient, kubeInterface, restConfig,
+ exec.PodLocator{
Namespace: pod.Namespace,
PodName: pod.Name,
},
- AppDBName,
+ exec.DatabaseName(dbName),
query)
if err != nil {
return nil, err
@@ -179,8 +204,13 @@ func GetReplicationSlotLsnsOnPod(
}
// ToggleHAReplicationSlots sets the HA Replication Slot feature on/off depending on `enable`
-func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *TestingEnvironment) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+func ToggleHAReplicationSlots(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+ enable bool,
+) error {
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
if err != nil {
return err
}
@@ -194,7 +224,7 @@ func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *T
}
clusterToggle.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(enable)
- err = env.Client.Patch(env.Ctx, clusterToggle, ctrlclient.MergeFrom(cluster))
+ err = crudClient.Patch(ctx, clusterToggle, client.MergeFrom(cluster))
if err != nil {
return err
}
@@ -202,8 +232,13 @@ func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *T
}
// ToggleSynchronizeReplicationSlots sets the Synchronize Replication Slot feature on/off depending on `enable`
-func ToggleSynchronizeReplicationSlots(namespace, clusterName string, enable bool, env *TestingEnvironment) error {
- cluster, err := env.GetCluster(namespace, clusterName)
+func ToggleSynchronizeReplicationSlots(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+ enable bool,
+) error {
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
if err != nil {
return err
}
@@ -217,7 +252,7 @@ func ToggleSynchronizeReplicationSlots(namespace, clusterName string, enable boo
}
clusterToggle.Spec.ReplicationSlots.SynchronizeReplicas.Enabled = ptr.To(enable)
- err = env.Client.Patch(env.Ctx, clusterToggle, ctrlclient.MergeFrom(cluster))
+ err = crudClient.Patch(ctx, clusterToggle, client.MergeFrom(cluster))
if err != nil {
return err
}
diff --git a/tests/utils/run.go b/tests/utils/run/run.go
similarity index 71%
rename from tests/utils/run.go
rename to tests/utils/run/run.go
index 7ae3092cbd..d1baa7d80e 100644
--- a/tests/utils/run.go
+++ b/tests/utils/run/run.go
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package run contains functions to execute commands locally
+package run
import (
"bytes"
@@ -26,10 +27,12 @@ import (
"github.com/avast/retry-go/v4"
"github.com/google/shlex"
"github.com/onsi/ginkgo/v2"
+
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
)
-// RunUnchecked executes a command and process the information
-func RunUnchecked(command string) (stdout string, stderr string, err error) {
+// Unchecked executes a command and process the information
+func Unchecked(command string) (stdout string, stderr string, err error) {
tokens, err := shlex.Split(command)
if err != nil {
ginkgo.GinkgoWriter.Printf("Error parsing command `%v`: %v\n", command, err)
@@ -48,8 +51,8 @@ func RunUnchecked(command string) (stdout string, stderr string, err error) {
return
}
-// RunUncheckedRetry executes a command and process the information with retry
-func RunUncheckedRetry(command string) (stdout string, stderr string, err error) {
+// UncheckedRetry executes a command and process the information with retry
+func UncheckedRetry(command string) (stdout string, stderr string, err error) {
var tokens []string
tokens, err = shlex.Split(command)
if err != nil {
@@ -64,8 +67,8 @@ func RunUncheckedRetry(command string) (stdout string, stderr string, err error)
cmd.Stdout, cmd.Stderr = &outBuffer, &errBuffer
return cmd.Run()
},
- retry.Delay(PollingTime*time.Second),
- retry.Attempts(RetryAttempts),
+ retry.Delay(objects.PollingTime*time.Second),
+ retry.Attempts(objects.RetryAttempts),
retry.DelayType(retry.FixedDelay),
)
stdout = outBuffer.String()
@@ -78,19 +81,7 @@ func RunUncheckedRetry(command string) (stdout string, stderr string, err error)
// Run executes a command and prints the output when terminates with an error
func Run(command string) (stdout string, stderr string, err error) {
- stdout, stderr, err = RunUnchecked(command)
-
- var exerr *exec.ExitError
- if errors.As(err, &exerr) {
- ginkgo.GinkgoWriter.Printf("RunCheck: %v\nExitCode: %v\n Out:\n%v\nErr:\n%v\n",
- command, exerr.ExitCode(), stdout, stderr)
- }
- return
-}
-
-// RunRetry executes a command with retry and prints the output when terminates with an error
-func RunRetry(command string) (stdout string, stderr string, err error) {
- stdout, stderr, err = RunUncheckedRetry(command)
+ stdout, stderr, err = Unchecked(command)
var exerr *exec.ExitError
if errors.As(err, &exerr) {
diff --git a/tests/utils/secrets.go b/tests/utils/secrets/secrets.go
similarity index 77%
rename from tests/utils/secrets.go
rename to tests/utils/secrets/secrets.go
index c6f01b3f10..856e0d1ff2 100644
--- a/tests/utils/secrets.go
+++ b/tests/utils/secrets/secrets.go
@@ -14,9 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package secrets provides functions to manage and handle secrets
+package secrets
import (
+ "context"
"fmt"
corev1 "k8s.io/api/core/v1"
@@ -27,15 +29,17 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
)
// CreateSecretCA generates a CA for the cluster and return the cluster and the key pair
func CreateSecretCA(
- namespace string,
- clusterName string,
- caSecName string,
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName, caSecName string,
includeCAPrivateKey bool,
- env *TestingEnvironment) (
+) (
*apiv1.Cluster, *certs.KeyPair, error,
) {
// creating root CA certificates
@@ -43,7 +47,7 @@ func CreateSecretCA(
cluster.Namespace = namespace
cluster.Name = clusterName
secret := &corev1.Secret{}
- err := env.Client.Get(env.Ctx, client.ObjectKey{Namespace: namespace, Name: caSecName}, secret)
+ err := crudClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: caSecName}, secret)
if !apierrors.IsNotFound(err) {
return cluster, nil, err
}
@@ -58,7 +62,7 @@ func CreateSecretCA(
if !includeCAPrivateKey {
delete(caSecret.Data, certs.CAPrivateKeyKey)
}
- _, err = CreateObject(env, caSecret)
+ _, err = objects.Create(ctx, crudClient, caSecret)
if err != nil {
return cluster, caPair, err
}
@@ -67,13 +71,14 @@ func CreateSecretCA(
// GetCredentials retrieve username and password from secrets and return it as per user suffix
func GetCredentials(
- clusterName, namespace string,
- secretSuffix string,
- env *TestingEnvironment) (
+ ctx context.Context,
+ crudClient client.Client,
+ clusterName, namespace, secretSuffix string,
+) (
string, string, error,
) {
// Get the cluster
- cluster, err := env.GetCluster(namespace, clusterName)
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
if err != nil {
return "", "", err
}
@@ -94,7 +99,7 @@ func GetCredentials(
Namespace: namespace,
Name: secretName,
}
- err = env.Client.Get(env.Ctx, secretNamespacedName, secret)
+ err = crudClient.Get(ctx, secretNamespacedName, secret)
if err != nil {
return "", "", err
}
@@ -105,11 +110,10 @@ func GetCredentials(
// CreateObjectStorageSecret generates an Opaque Secret with a given ID and Key
func CreateObjectStorageSecret(
- namespace string,
- secretName string,
- id string,
- key string,
- env *TestingEnvironment,
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, secretName string,
+ id, key string,
) (*corev1.Secret, error) {
targetSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -122,7 +126,7 @@ func CreateObjectStorageSecret(
},
Type: corev1.SecretTypeOpaque,
}
- obj, err := CreateObject(env, targetSecret)
+ obj, err := objects.Create(ctx, crudClient, targetSecret)
if err != nil {
return nil, err
}
diff --git a/tests/utils/service.go b/tests/utils/services/service.go
similarity index 72%
rename from tests/utils/service.go
rename to tests/utils/services/service.go
index cce93ca126..32acea4382 100644
--- a/tests/utils/service.go
+++ b/tests/utils/services/service.go
@@ -14,13 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package services provides functions tomanage services inside K8s
+package services
import (
+ "context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
)
@@ -45,29 +48,19 @@ func GetReadWriteServiceName(clusterName string) string {
return fmt.Sprintf("%v%v", clusterName, apiv1.ServiceReadWriteSuffix)
}
-// GetService gets a service given name and namespace
-func GetService(namespace, name string, env *TestingEnvironment) (*corev1.Service, error) {
- namespacedName := types.NamespacedName{
- Namespace: namespace,
- Name: name,
- }
- service := &corev1.Service{}
- err := GetObject(env, namespacedName, service)
- if err != nil {
- return nil, err
- }
- return service, nil
-}
-
-// GetRwServiceObject return read write service object
-func GetRwServiceObject(namespace, clusterName string, env *TestingEnvironment) (*corev1.Service, error) {
+// getRwServiceObject return read write service object
+func getRwServiceObject(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) (*corev1.Service, error) {
svcName := GetReadWriteServiceName(clusterName)
service := &corev1.Service{}
namespacedName := types.NamespacedName{
Namespace: namespace,
Name: svcName,
}
- err := env.Client.Get(env.Ctx, namespacedName, service)
+ err := crudClient.Get(ctx, namespacedName, service)
if err != nil {
return service, err
}
@@ -82,8 +75,12 @@ func CreateDSN(host, user, dbname, password string, sslmode SSLMode, port int) s
}
// GetHostName return fully qualified domain name for read write service
-func GetHostName(namespace, clusterName string, env *TestingEnvironment) (string, error) {
- rwService, err := GetRwServiceObject(namespace, clusterName, env)
+func GetHostName(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) (string, error) {
+ rwService, err := getRwServiceObject(ctx, crudClient, namespace, clusterName)
if err != nil {
return "", err
}
diff --git a/tests/utils/storage.go b/tests/utils/storage/storage.go
similarity index 73%
rename from tests/utils/storage.go
rename to tests/utils/storage/storage.go
index 16883a5f05..3bc2eab59d 100644
--- a/tests/utils/storage.go
+++ b/tests/utils/storage/storage.go
@@ -14,9 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package storage provides functions to manage enything related to storage
+package storage
import (
+ "context"
"fmt"
"os"
@@ -27,18 +29,28 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects"
)
// GetStorageAllowExpansion returns the boolean value of the 'AllowVolumeExpansion' value of the storage class
-func GetStorageAllowExpansion(defaultStorageClass string, env *TestingEnvironment) (*bool, error) {
+func GetStorageAllowExpansion(
+ ctx context.Context,
+ crudClient client.Client,
+ defaultStorageClass string,
+) (*bool, error) {
storageClass := &storagev1.StorageClass{}
- err := GetObject(env, client.ObjectKey{Name: defaultStorageClass}, storageClass)
+ err := objects.Get(ctx, crudClient, client.ObjectKey{Name: defaultStorageClass}, storageClass)
return storageClass.AllowVolumeExpansion, err
}
// IsWalStorageEnabled returns true if 'WalStorage' is being used
-func IsWalStorageEnabled(namespace, clusterName string, env *TestingEnvironment) (bool, error) {
- cluster, err := env.GetCluster(namespace, clusterName)
+func IsWalStorageEnabled(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace, clusterName string,
+) (bool, error) {
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
if cluster.Spec.WalStorage == nil {
return false, err
}
@@ -136,3 +148,28 @@ func SetSnapshotNameAsEnv(
}
return nil
}
+
+// GetPVCList gathers the current list of PVCs in a namespace
+func GetPVCList(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*corev1.PersistentVolumeClaimList, error) {
+ pvcList := &corev1.PersistentVolumeClaimList{}
+ err := crudClient.List(
+ ctx, pvcList, client.InNamespace(namespace),
+ )
+ return pvcList, err
+}
+
+// GetSnapshotList gathers the current list of VolumeSnapshots in a namespace
+func GetSnapshotList(
+ ctx context.Context,
+ crudClient client.Client,
+ namespace string,
+) (*volumesnapshot.VolumeSnapshotList, error) {
+ list := &volumesnapshot.VolumeSnapshotList{}
+ err := crudClient.List(ctx, list, client.InNamespace(namespace))
+
+ return list, err
+}
diff --git a/tests/utils/time.go b/tests/utils/time.go
deleted file mode 100644
index ecce38b9bd..0000000000
--- a/tests/utils/time.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
-)
-
-// GetCurrentTimestamp getting current time stamp from postgres server
-func GetCurrentTimestamp(namespace, clusterName string, env *TestingEnvironment) (string, error) {
- row, err := RunQueryRowOverForward(
- env,
- namespace,
- clusterName,
- AppDBName,
- apiv1.ApplicationUserSecretSuffix,
- "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');",
- )
- if err != nil {
- return "", err
- }
-
- var currentTimestamp string
- if err = row.Scan(¤tTimestamp); err != nil {
- return "", err
- }
-
- return currentTimestamp, nil
-}
diff --git a/tests/utils/timeouts.go b/tests/utils/timeouts/timeouts.go
similarity index 97%
rename from tests/utils/timeouts.go
rename to tests/utils/timeouts/timeouts.go
index 8edf62d1de..860a5e9df0 100644
--- a/tests/utils/timeouts.go
+++ b/tests/utils/timeouts/timeouts.go
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package timeouts contains the timeouts for the E2E test suite
+package timeouts
import (
"encoding/json"
diff --git a/tests/utils/utils.go b/tests/utils/utils.go
new file mode 100644
index 0000000000..58a81c8894
--- /dev/null
+++ b/tests/utils/utils.go
@@ -0,0 +1,170 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "text/tabwriter"
+
+ "github.com/cheynewallace/tabby"
+ batchv1 "k8s.io/api/batch/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ utils2 "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage"
+)
+
+// PrintClusterResources prints a summary of the cluster pods, jobs, pvcs etc.
+func PrintClusterResources(ctx context.Context, crudClient client.Client, namespace, clusterName string) string {
+ cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName)
+ if err != nil {
+ return fmt.Sprintf("Error while Getting Object %v", err)
+ }
+
+ buffer := &bytes.Buffer{}
+ w := tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0)
+ clusterInfo := tabby.NewCustom(w)
+ clusterInfo.AddLine("Timeout while waiting for cluster ready, dumping more cluster information for analysis...")
+ clusterInfo.AddLine()
+ clusterInfo.AddLine()
+ clusterInfo.AddLine("Cluster information:")
+ clusterInfo.AddLine("Name", cluster.GetName())
+ clusterInfo.AddLine("Namespace", cluster.GetNamespace())
+ clusterInfo.AddLine()
+ clusterInfo.AddHeader("Items", "Values")
+ clusterInfo.AddLine("Spec.Instances", cluster.Spec.Instances)
+ clusterInfo.AddLine("Wal storage", cluster.ShouldCreateWalArchiveVolume())
+ clusterInfo.AddLine("Cluster phase", cluster.Status.Phase)
+ clusterInfo.AddLine("Phase reason", cluster.Status.PhaseReason)
+ clusterInfo.AddLine("Cluster target primary", cluster.Status.TargetPrimary)
+ clusterInfo.AddLine("Cluster current primary", cluster.Status.CurrentPrimary)
+ clusterInfo.AddLine()
+
+ podList, _ := clusterutils.ListPods(ctx, crudClient, cluster.GetNamespace(), cluster.GetName())
+
+ clusterInfo.AddLine("Cluster Pods information:")
+ clusterInfo.AddLine("Ready pod number: ", utils2.CountReadyPods(podList.Items))
+ clusterInfo.AddLine()
+ clusterInfo.AddHeader("Items", "Values")
+ for _, pod := range podList.Items {
+ clusterInfo.AddLine("Pod name", pod.Name)
+ clusterInfo.AddLine("Pod phase", pod.Status.Phase)
+ if cluster.Status.InstancesReportedState != nil {
+ if instanceReportState, ok := cluster.Status.InstancesReportedState[v1.PodName(pod.Name)]; ok {
+ clusterInfo.AddLine("Is Primary", instanceReportState.IsPrimary)
+ clusterInfo.AddLine("TimeLineID", instanceReportState.TimeLineID)
+ clusterInfo.AddLine("---", "---")
+ }
+ } else {
+ clusterInfo.AddLine("InstanceReportState not reported", "")
+ }
+ }
+
+ clusterInfo.AddLine("Jobs information:")
+ clusterInfo.AddLine()
+ clusterInfo.AddHeader("Items", "Values")
+ jobList := &batchv1.JobList{}
+ _ = crudClient.List(
+ ctx, jobList, client.InNamespace(namespace),
+ )
+ for _, job := range jobList.Items {
+ clusterInfo.AddLine("Job name", job.Name)
+ clusterInfo.AddLine("Job status", fmt.Sprintf("%#v", job.Status))
+ }
+
+ pvcList, _ := storage.GetPVCList(ctx, crudClient, cluster.GetNamespace())
+ clusterInfo.AddLine()
+ clusterInfo.AddLine("Cluster PVC information: (dumping all pvc under the namespace)")
+ clusterInfo.AddLine("Available Cluster PVCCount", cluster.Status.PVCCount)
+ clusterInfo.AddLine()
+ clusterInfo.AddHeader("Items", "Values")
+ for _, pvc := range pvcList.Items {
+ clusterInfo.AddLine("PVC name", pvc.Name)
+ clusterInfo.AddLine("PVC phase", pvc.Status.Phase)
+ clusterInfo.AddLine("---", "---")
+ }
+
+ snapshotList, _ := storage.GetSnapshotList(ctx, crudClient, cluster.Namespace)
+ clusterInfo.AddLine()
+ clusterInfo.AddLine("Cluster Snapshot information: (dumping all snapshot under the namespace)")
+ clusterInfo.AddLine()
+ clusterInfo.AddHeader("Items", "Values")
+ for _, snapshot := range snapshotList.Items {
+ clusterInfo.AddLine("Snapshot name", snapshot.Name)
+ if snapshot.Status.ReadyToUse != nil {
+ clusterInfo.AddLine("Snapshot ready to use", *snapshot.Status.ReadyToUse)
+ } else {
+ clusterInfo.AddLine("Snapshot ready to use", "false")
+ }
+ clusterInfo.AddLine("---", "---")
+ }
+
+ // do not remove, this is needed to ensure that the writer cache is always flushed.
+ clusterInfo.Print()
+
+ return buffer.String()
+}
+
+// ForgeArchiveWalOnMinio instead of using `switchWalCmd` to generate a real WAL archive, directly forges a WAL archive
+// file on Minio by copying and renaming an existing WAL archive file for the sake of more control of testing. To make
+// sure the forged one won't be a real WAL archive, we let the sequence in newWALName to be big enough so that it can't
+// be a real WAL archive name in an idle postgresql.
+func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error {
+ // Forge a WAL archive by copying and renaming the 1st WAL archive
+ minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000"
+ existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz"
+ newWALNamePath := minioWALBasePath + "/" + newWALName
+ forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath
+ _, _, err := run.UncheckedRetry(fmt.Sprintf(
+ "kubectl exec -n %v %v -- %v",
+ namespace,
+ miniClientPodName,
+ forgeWALOnMinioCmd))
+
+ return err
+}
+
+// TestFileExist tests if a file specified with `fileName` exist under directory `directoryPath`, on pod `podName` in
+// namespace `namespace`
+func TestFileExist(namespace, podName, directoryPath, fileName string) bool {
+ filePath := directoryPath + "/" + fileName
+ testFileExistCommand := "test -f " + filePath
+ _, _, err := run.Unchecked(fmt.Sprintf(
+ "kubectl exec -n %v %v -- %v",
+ namespace,
+ podName,
+ testFileExistCommand))
+
+ return err == nil
+}
+
+// TestDirectoryEmpty tests if a directory `directoryPath` exists on pod `podName` in namespace `namespace`
+func TestDirectoryEmpty(namespace, podName, directoryPath string) bool {
+ testDirectoryEmptyCommand := "test \"$(ls -A" + directoryPath + ")\""
+ _, _, err := run.Unchecked(fmt.Sprintf(
+ "kubectl exec -n %v %v -- %v",
+ namespace,
+ podName,
+ testDirectoryEmptyCommand))
+
+ return err == nil
+}
diff --git a/tests/utils/version.go b/tests/utils/version.go
deleted file mode 100644
index 2416df4b08..0000000000
--- a/tests/utils/version.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "fmt"
-
- "github.com/cloudnative-pg/machinery/pkg/image/reference"
- "github.com/cloudnative-pg/machinery/pkg/postgres/version"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
-)
-
-// BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available)
-func BumpPostgresImageMajorVersion(postgresImage string) (string, error) {
- imageReference := reference.New(postgresImage)
-
- postgresImageVersion, err := version.FromTag(imageReference.Tag)
- if err != nil {
- return "", err
- }
-
- targetPostgresImageMajorVersionInt := postgresImageVersion.Major() + 1
-
- defaultImageVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag)
- if err != nil {
- return "", err
- }
-
- if targetPostgresImageMajorVersionInt >= defaultImageVersion.Major() {
- return postgresImage, nil
- }
-
- imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion.Major()+1)
-
- return imageReference.GetNormalizedName(), nil
-}
diff --git a/tests/utils/webapp.go b/tests/utils/webapp.go
deleted file mode 100644
index ec2cc1b4bb..0000000000
--- a/tests/utils/webapp.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/ptr"
-)
-
-// DefaultWebapp returns a struct representing a
-func DefaultWebapp(namespace string, name string, rootCASecretName string, tlsSecretName string) corev1.Pod {
- var secretMode int32 = 0o600
- seccompProfile := &corev1.SeccompProfile{
- Type: corev1.SeccompProfileTypeRuntimeDefault,
- }
-
- return corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- },
- Spec: corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: "secret-volume-root-ca",
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: rootCASecretName,
- DefaultMode: &secretMode,
- },
- },
- },
- {
- Name: "secret-volume-tls",
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: tlsSecretName,
- DefaultMode: &secretMode,
- },
- },
- },
- },
- Containers: []corev1.Container{
- {
- Name: name,
- Image: "ghcr.io/cloudnative-pg/webtest:1.6.0",
- Ports: []corev1.ContainerPort{
- {
- ContainerPort: 8080,
- },
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "secret-volume-root-ca",
- MountPath: "/etc/secrets/ca",
- },
- {
- Name: "secret-volume-tls",
- MountPath: "/etc/secrets/tls",
- },
- },
- SecurityContext: &corev1.SecurityContext{
- AllowPrivilegeEscalation: ptr.To(false),
- SeccompProfile: seccompProfile,
- },
- },
- },
- SecurityContext: &corev1.PodSecurityContext{
- SeccompProfile: seccompProfile,
- },
- },
- }
-}
diff --git a/tests/utils/yaml.go b/tests/utils/yaml/yaml.go
similarity index 59%
rename from tests/utils/yaml.go
rename to tests/utils/yaml/yaml.go
index d418643470..c04f844978 100644
--- a/tests/utils/yaml.go
+++ b/tests/utils/yaml/yaml.go
@@ -14,13 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package utils
+// Package yaml provides functions to handle yaml files
+package yaml
import (
"bytes"
"fmt"
"log"
+ "os"
+ "path/filepath"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -61,3 +68,33 @@ func ParseObjectsFromYAML(data []byte, namespace string) ([]client.Object, error
}
return objects, nil
}
+
+// GetResourceNameFromYAML returns the name of a resource in a YAML file
+func GetResourceNameFromYAML(scheme *runtime.Scheme, path string) (string, error) {
+ namespacedName, err := getResourceNamespacedNameFromYAML(scheme, path)
+ if err != nil {
+ return "", err
+ }
+ return namespacedName.Name, err
+}
+
+// getResourceNamespacedNameFromYAML returns the NamespacedName representing a resource in a YAML file
+func getResourceNamespacedNameFromYAML(
+ scheme *runtime.Scheme,
+ path string,
+) (types.NamespacedName, error) {
+ data, err := os.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return types.NamespacedName{}, err
+ }
+ decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer()
+ obj, _, err := decoder.Decode(data, nil, nil)
+ if err != nil {
+ return types.NamespacedName{}, err
+ }
+ objectMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return types.NamespacedName{}, err
+ }
+ return types.NamespacedName{Namespace: objectMeta.GetNamespace(), Name: objectMeta.GetName()}, nil
+}
From 895748f0dac002ffcf292cd6088bf53d5fcbcc8d Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 3 Jan 2025 13:35:01 +0100
Subject: [PATCH 277/836] test(e2e): ensure unique namespaces during parallel
tests (#6456)
Closes #6395
Signed-off-by: Armando Ruocco
---
tests/utils/environment/environment.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/tests/utils/environment/environment.go b/tests/utils/environment/environment.go
index 4a216075be..cbd9c4c971 100644
--- a/tests/utils/environment/environment.go
+++ b/tests/utils/environment/environment.go
@@ -48,7 +48,8 @@ import (
// Import the client auth plugin package to allow use gke or ake to run tests
_ "k8s.io/client-go/plugin/pkg/client/auth"
- . "github.com/onsi/gomega" // nolint
+ . "github.com/onsi/ginkgo/v2" // nolint
+ . "github.com/onsi/gomega" // nolint
)
const (
@@ -77,9 +78,10 @@ type uniqueStringSlice struct {
func (a *uniqueStringSlice) generateUniqueName(prefix string) string {
a.mu.Lock()
defer a.mu.Unlock()
+ process := GinkgoParallelProcess()
for {
- potentialUniqueName := fmt.Sprintf("%s-%d", prefix, funk.RandomInt(0, 9999))
+ potentialUniqueName := fmt.Sprintf("%s-%d-%d", prefix, process, funk.RandomInt(0, 9999))
if !slices.Contains(a.values, potentialUniqueName) {
a.values = append(a.values, potentialUniqueName)
return potentialUniqueName
From f306052e1f827acb05161123bf38ee24e7162260 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 3 Jan 2025 17:02:00 +0100
Subject: [PATCH 278/836] test(e2e): higher timeout and better error
descriptions for `switchWalAndGetLatestArchive` (#6448)
Closes #6413
Signed-off-by: Armando Ruocco
Signed-off-by: Leonardo Cecchi
Co-authored-by: Leonardo Cecchi
---
tests/e2e/asserts_test.go | 16 +++++++++++-----
tests/utils/exec/exec.go | 15 +++++++++++++++
2 files changed, 26 insertions(+), 5 deletions(-)
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 97c5758b96..c35de5258e 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -1999,15 +1999,18 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) {
// switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file
func switchWalAndGetLatestArchive(namespace, podName string) string {
- _, _, err := exec.QueryInInstancePod(
+ _, _, err := exec.QueryInInstancePodWithTimeout(
env.Ctx, env.Client, env.Interface, env.RestClientConfig,
exec.PodLocator{
Namespace: namespace,
PodName: podName,
},
postgres.PostgresDBName,
- "CHECKPOINT;")
- Expect(err).ToNot(HaveOccurred())
+ "CHECKPOINT",
+ 300*time.Second,
+ )
+ Expect(err).ToNot(HaveOccurred(),
+ "failed to trigger a new wal while executing 'switchWalAndGetLatestArchive'")
out, _, err := exec.QueryInInstancePod(
env.Ctx, env.Client, env.Interface, env.RestClientConfig,
@@ -2016,8 +2019,11 @@ func switchWalAndGetLatestArchive(namespace, podName string) string {
PodName: podName,
},
postgres.PostgresDBName,
- "SELECT pg_walfile_name(pg_switch_wal());")
- Expect(err).ToNot(HaveOccurred())
+ "SELECT pg_walfile_name(pg_switch_wal());",
+ )
+ Expect(err).ToNot(
+ HaveOccurred(),
+ "failed to get latest wal file name while executing 'switchWalAndGetLatestArchive")
return strings.TrimSpace(out)
}
diff --git a/tests/utils/exec/exec.go b/tests/utils/exec/exec.go
index 58ca134ca6..7196a273da 100644
--- a/tests/utils/exec/exec.go
+++ b/tests/utils/exec/exec.go
@@ -107,6 +107,21 @@ func QueryInInstancePod(
query string,
) (string, string, error) {
timeout := time.Second * 10
+ return QueryInInstancePodWithTimeout(ctx, crudClient, kubeInterface, restConfig, podLocator, dbname, query, timeout)
+}
+
+// QueryInInstancePodWithTimeout executes a query in an instance pod, by connecting to the pod
+// and the postgres container, and using a local connection with the postgres user
+func QueryInInstancePodWithTimeout(
+ ctx context.Context,
+ crudClient client.Client,
+ kubeInterface kubernetes.Interface,
+ restConfig *rest.Config,
+ podLocator PodLocator,
+ dbname DatabaseName,
+ query string,
+ timeout time.Duration,
+) (string, string, error) {
return CommandInInstancePod(
ctx, crudClient, kubeInterface, restConfig,
PodLocator{
From f2e5a04a6a5e2bc2e21bbf2b0994f9826123e9c9 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 3 Jan 2025 18:16:17 +0100
Subject: [PATCH 279/836] fix(managed-services): preserve user specified port
settings (#6474)
Closes #6389
Signed-off-by: Armando Ruocco
---
pkg/specs/services.go | 3 ++-
pkg/specs/services_test.go | 24 ++++++++++++++++++++++++
2 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/pkg/specs/services.go b/pkg/specs/services.go
index e7978e6c6f..454f828709 100644
--- a/pkg/specs/services.go
+++ b/pkg/specs/services.go
@@ -155,7 +155,8 @@ func BuildManagedServices(cluster apiv1.Cluster) ([]corev1.Service, error) {
SetSelectors(defaultService.Spec.Selector)
for idx := range defaultService.Spec.Ports {
- builder = builder.WithServicePort(&defaultService.Spec.Ports[idx])
+ // we preserve the user settings over the default configuration, issue: #6389
+ builder = builder.WithServicePortNoOverwrite(&defaultService.Spec.Ports[idx])
}
for key, value := range defaultService.Labels {
diff --git a/pkg/specs/services_test.go b/pkg/specs/services_test.go
index dd146fde25..86c108c7a6 100644
--- a/pkg/specs/services_test.go
+++ b/pkg/specs/services_test.go
@@ -152,6 +152,30 @@ var _ = Describe("BuildManagedServices", func() {
Expect(services[0].ObjectMeta.Labels).To(HaveKeyWithValue(utils.IsManagedLabelName, "true"))
Expect(services[0].ObjectMeta.Labels).To(HaveKeyWithValue("test-label", "test-value"))
Expect(services[0].ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation", "test-value"))
+ Expect(services[0].Spec.Ports).To(ContainElement(corev1.ServicePort{
+ Name: PostgresContainerName,
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.FromInt32(postgres.ServerPort),
+ Port: postgres.ServerPort,
+ NodePort: 0,
+ }))
+ })
+
+ It("should not overwrite the user specified service port with the default one", func() {
+ cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.Spec.Ports = []corev1.ServicePort{
+ {
+ Name: PostgresContainerName,
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.FromInt32(postgres.ServerPort),
+ Port: postgres.ServerPort,
+ NodePort: 5533,
+ },
+ }
+ services, err := BuildManagedServices(cluster)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(services).NotTo(BeNil())
+ Expect(services).To(HaveLen(1))
+ Expect(services[0].Spec.Ports[0].NodePort).To(Equal(int32(5533)))
})
})
})
From 62014067abdc85f87ddfeb842a133c89e08ba8c4 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 3 Jan 2025 22:40:04 +0100
Subject: [PATCH 280/836] test(e2e): retry `AssertWritesToPrimarySucceeds` and
`AssertWritesToReplicaFails` (#6477)
Closes #6415
Signed-off-by: Armando Ruocco
---
tests/e2e/asserts_test.go | 82 ++++++++++++++++++++-------------------
1 file changed, 43 insertions(+), 39 deletions(-)
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index c35de5258e..db514be314 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -1146,54 +1146,58 @@ func AssertWritesToReplicaFails(
namespace, service, appDBName, appDBUser, appDBPass string,
) {
By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() {
- forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
- env.Ctx, env.Interface, env.RestClientConfig,
- namespace, service, appDBName, appDBUser, appDBPass,
- )
- defer func() {
- _ = conn.Close()
- forwardConn.Close()
- }()
- Expect(err).ToNot(HaveOccurred())
+ Eventually(func(g Gomega) {
+ forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ namespace, service,
+ appDBName, appDBUser, appDBPass)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ g.Expect(err).ToNot(HaveOccurred())
- var rawValue string
- // Expect to be connected to a replica
- row := conn.QueryRow("SELECT pg_is_in_recovery()")
- err = row.Scan(&rawValue)
- Expect(err).ToNot(HaveOccurred())
- isReplica := strings.TrimSpace(rawValue)
- Expect(isReplica).To(BeEquivalentTo("true"))
+ var rawValue string
+ // Expect to be connected to a replica
+ row := conn.QueryRow("SELECT pg_is_in_recovery()")
+ err = row.Scan(&rawValue)
+ g.Expect(err).ToNot(HaveOccurred())
+ isReplica := strings.TrimSpace(rawValue)
+ g.Expect(isReplica).To(BeEquivalentTo("true"))
- // Expect to be in a read-only transaction
- _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)")
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction"))
+ // Expect to be in a read-only transaction
+ _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)")
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction"))
+ }, RetryTimeout).Should(Succeed())
})
}
func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) {
By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() {
- forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
- env.Ctx, env.Interface, env.RestClientConfig,
- namespace, service, appDBName, appDBUser, appDBPass,
- )
- defer func() {
- _ = conn.Close()
- forwardConn.Close()
- }()
- Expect(err).ToNot(HaveOccurred())
+ Eventually(func(g Gomega) {
+ forwardConn, conn, err := postgres.ForwardPSQLServiceConnection(
+ env.Ctx, env.Interface, env.RestClientConfig,
+ namespace, service,
+ appDBName, appDBUser, appDBPass)
+ defer func() {
+ _ = conn.Close()
+ forwardConn.Close()
+ }()
+ g.Expect(err).ToNot(HaveOccurred())
- var rawValue string
- // Expect to be connected to a primary
- row := conn.QueryRow("SELECT pg_is_in_recovery()")
- err = row.Scan(&rawValue)
- Expect(err).ToNot(HaveOccurred())
- isReplica := strings.TrimSpace(rawValue)
- Expect(isReplica).To(BeEquivalentTo("false"))
+ var rawValue string
+ // Expect to be connected to a primary
+ row := conn.QueryRow("SELECT pg_is_in_recovery()")
+ err = row.Scan(&rawValue)
+ g.Expect(err).ToNot(HaveOccurred())
+ isReplica := strings.TrimSpace(rawValue)
+ g.Expect(isReplica).To(BeEquivalentTo("false"))
- // Expect to be able to write
- _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)")
- Expect(err).ToNot(HaveOccurred())
+ // Expect to be able to write
+ _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)")
+ g.Expect(err).ToNot(HaveOccurred())
+ }, RetryTimeout).Should(Succeed())
})
}
From e59b3d32a6a9b00a3024b43a261978f372d6a689 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 4 Jan 2025 09:28:03 +0100
Subject: [PATCH 281/836] chore(deps): update dependency golangci/golangci-lint
to v1.63.4 (main) (#6482)
---
.github/workflows/continuous-integration.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 4e52d12902..d5c39b2e61 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -17,7 +17,7 @@ on:
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.23.x"
- GOLANGCI_LINT_VERSION: "v1.62.2"
+ GOLANGCI_LINT_VERSION: "v1.63.4"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.26.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
From 92fbc45ec9f7b2cc7443191e3c8740af5793eb5c Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 4 Jan 2025 10:26:46 +0100
Subject: [PATCH 282/836] chore(deps): update dependency rook/rook to v1.16.1
(main) (#6502)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 639cb9b9c4..6ccdc1b64b 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -37,7 +37,7 @@ env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.26.0"
- ROOK_VERSION: "v1.16.0"
+ ROOK_VERSION: "v1.16.1"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
From 8cd4415a3d3724eaa916702f7d0b17ca51301ffb Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Sat, 4 Jan 2025 11:29:00 +0100
Subject: [PATCH 283/836] test(e2e): remove redudant
`AssertPostgresNoPendingRestart` calls (#6435)
Closes #6410
Signed-off-by: Armando Ruocco
Signed-off-by: Francesco Canovai
Co-authored-by: Francesco Canovai
---
tests/e2e/asserts_test.go | 34 --------------------------
tests/e2e/configuration_update_test.go | 14 -----------
2 files changed, 48 deletions(-)
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index db514be314..2c4806bc06 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -2684,40 +2684,6 @@ func DeleteResourcesFromFile(namespace, sampleFilePath string) error {
return nil
}
-// Assert in the giving cluster, all the postgres db has no pending restart
-func AssertPostgresNoPendingRestart(namespace, clusterName string, timeout int) {
- By("waiting for all pods have no pending restart", func() {
- podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
- Expect(err).ToNot(HaveOccurred())
- query := "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)"
- // Check that the new parameter has been modified in every pod
- Eventually(func() (bool, error) {
- noPendingRestart := true
- for _, pod := range podList.Items {
- stdout, _, err := exec.QueryInInstancePod(
- env.Ctx, env.Client, env.Interface, env.RestClientConfig,
- exec.PodLocator{
- Namespace: pod.Namespace,
- PodName: pod.Name,
- },
- postgres.PostgresDBName,
- query)
- if err != nil {
- return false, nil
- }
- if strings.Trim(stdout, "\n") == "f" {
- continue
- }
-
- noPendingRestart = false
- break
- }
- return noPendingRestart, nil
- }, timeout, 2).Should(BeEquivalentTo(true),
- "all pods in cluster has no pending restart")
- })
-}
-
func AssertBackupConditionTimestampChangedInClusterStatus(
namespace,
clusterName string,
diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go
index 4918d6d755..46b7117b41 100644
--- a/tests/e2e/configuration_update_test.go
+++ b/tests/e2e/configuration_update_test.go
@@ -156,9 +156,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
Expect(err).ToNot(HaveOccurred())
By("apply configuration update", func() {
- // Update the configuration
updateClusterPostgresParams(postgresParams, namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, 300)
})
By("verify that work_mem result as expected", func() {
@@ -196,9 +194,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
})
By("apply configuration update", func() {
- // Update the configuration
updateClusterPostgresPgHBA(namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, 300)
})
By("verify that connections succeed after pg_hba_reload", func() {
@@ -239,10 +235,8 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
oldPrimary := cluster.Status.CurrentPrimary
By("apply configuration update", func() {
- // Update the configuration
postgresParams["shared_buffers"] = "256MB"
updateClusterPostgresParams(postgresParams, namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, timeout)
})
By("verify that shared_buffers setting changed", func() {
@@ -282,11 +276,9 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
oldPrimary := cluster.Status.CurrentPrimary
By("apply configuration update", func() {
- // Update the configuration
postgresParams["max_replication_slots"] = "16"
postgresParams["maintenance_work_mem"] = "128MB"
updateClusterPostgresParams(postgresParams, namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, timeout)
})
By("verify that both parameters have been modified in each pod", func() {
@@ -358,7 +350,6 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
delete(postgresParams, "port")
postgresParams["max_connections"] = "105"
updateClusterPostgresParams(postgresParams, namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, timeout)
})
By("verify that max_connections has been decreased in every pod", func() {
@@ -402,10 +393,8 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
oldPrimary := cluster.Status.CurrentPrimary
By("apply configuration update", func() {
- // Update the configuration
delete(postgresParams, "max_connections")
updateClusterPostgresParams(postgresParams, namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, timeout)
})
By("verify that the max_connections has been set to default in every pod", func() {
@@ -457,9 +446,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada
})
By("apply configuration update", func() {
- // Update the configuration
updateClusterPostgresPgIdent(namespace)
- AssertPostgresNoPendingRestart(namespace, clusterName, 300)
})
By("verify that there are now two entries in pg_ident_file_mappings", func() {
@@ -661,7 +648,6 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La
}, 160).Should(BeEquivalentTo(10))
}
})
- AssertPostgresNoPendingRestart(namespace, clusterName, 120)
})
})
})
From cd3e74a8aca6a3309813078432bd11c46a587008 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 4 Jan 2025 14:27:31 +0100
Subject: [PATCH 284/836] chore(deps): update module
github.com/goreleaser/goreleaser to v2.5.1 (main) (#6506)
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 0ebe229185..d08b303300 100644
--- a/Makefile
+++ b/Makefile
@@ -43,7 +43,7 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
-GORELEASER_VERSION ?= v2.5.0
+GORELEASER_VERSION ?= v2.5.1
SPELLCHECK_VERSION ?= 0.45.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.38.0
From d783035474ef63a4bdf36470211bec0f5a7e014d Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 4 Jan 2025 19:20:06 +0100
Subject: [PATCH 285/836] fix(deps): update github.com/cloudnative-pg/machinery
digest to 95c37fe (main) (#6490)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 8ddc9591f2..279ca436cd 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc
- github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607
+ github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
diff --git a/go.sum b/go.sum
index 22f177486f..3572a565e4 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrE
github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY=
-github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607 h1:Jymgt/H6iNoUZCqF6YtOqE2GgQIM1e1tWjT42B6vPJs=
-github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607/go.mod h1:n6br6GuNXcwYI5SgRArt9rM2hgZ1ElZr4vkJCWfiC/U=
+github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww=
+github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0/go.mod h1:pitcj6ztiuxfSFH5EbVHv8iCVxF+yQkzf9o9A1KoDvI=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
From 995f3e247d8826687d556567488fa01fd9010d44 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 6 Jan 2025 18:08:27 +0100
Subject: [PATCH 286/836] fix(deps): update module golang.org/x/term to v0.28.0
(main) (#6520)
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index 279ca436cd..f163185fe6 100644
--- a/go.mod
+++ b/go.mod
@@ -37,7 +37,7 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/term v0.27.0
+ golang.org/x/term v0.28.0
google.golang.org/grpc v1.69.2
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.0
@@ -104,7 +104,7 @@ require (
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
- golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.28.0 // indirect
diff --git a/go.sum b/go.sum
index 3572a565e4..db30e2b287 100644
--- a/go.sum
+++ b/go.sum
@@ -240,10 +240,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
-golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
+golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
From 17d5bd1c45a14da35e7e55c86a30c870a8d5be6e Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 6 Jan 2025 21:16:14 +0100
Subject: [PATCH 287/836] fix(deps): update
github.com/cloudnative-pg/barman-cloud digest to c147262 (main) (#6516)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index f163185fe6..5058c8e429 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
github.com/avast/retry-go/v4 v4.6.0
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
- github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a
+ github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc
github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
diff --git a/go.sum b/go.sum
index db30e2b287..ff99792126 100644
--- a/go.sum
+++ b/go.sum
@@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54=
github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
-github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU=
-github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
+github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 h1:u11mKIHmbEGQWLsAb5hguwgGOOddA8lpPFAViBpbkt8=
+github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY=
github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww=
From 1d0f8ca652a4b8a8bb70625ca45432a862bc8e04 Mon Sep 17 00:00:00 2001
From: Brad Holland
Date: Tue, 7 Jan 2025 03:08:06 -0500
Subject: [PATCH 288/836] docs: fix typo in prometheus rules example (#6473)
* fix typo in sample alert description
* remove trailing white spaces
Closes #6499
Signed-off-by: Brad Holland
---
docs/src/samples/monitoring/prometheusrule.yaml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/src/samples/monitoring/prometheusrule.yaml b/docs/src/samples/monitoring/prometheusrule.yaml
index b74c66590b..eb344769af 100644
--- a/docs/src/samples/monitoring/prometheusrule.yaml
+++ b/docs/src/samples/monitoring/prometheusrule.yaml
@@ -26,7 +26,7 @@ spec:
severity: warning
- alert: PGDatabaseXidAge
annotations:
- description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }}
+ description: Over 300,000,000 transactions from frozen xid on pod {{ $labels.pod }}
summary: Number of transactions from the frozen XID to the current one
expr: |-
cnpg_pg_database_xid_age > 300000000
@@ -42,7 +42,7 @@ spec:
for: 1m
labels:
severity: warning
- - alert: LastFailedArchiveTime
+ - alert: LastFailedArchiveTime
annotations:
description: Archiving failed for {{ $labels.pod }}
summary: Checks the last time archiving failed. Will be < 0 when it has not failed.
@@ -51,7 +51,7 @@ spec:
for: 1m
labels:
severity: warning
- - alert: DatabaseDeadlockConflicts
+ - alert: DatabaseDeadlockConflicts
annotations:
description: There are over 10 deadlock conflicts in {{ $labels.pod }}
summary: Checks the number of database conflicts
From a3369be679a818477bec0d99840bd1c205be8651 Mon Sep 17 00:00:00 2001
From: samtoxie
Date: Tue, 7 Jan 2025 09:56:21 +0100
Subject: [PATCH 289/836] doc: add kubectl plugin instructions for Arch Linux
User Repository (AUR) (#4745)
Add instructions to install the kubectl-cnpg plugin for Arch users
using the AUR infrastructure.
Signed-off-by: Sam Toxopeus
---
docs/src/kubectl-plugin.md | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index d001a3397e..2637b250a3 100644
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -80,6 +80,22 @@ Installed size: 78 M
Is this ok [y/N]: y
```
+### Using the Arch Linux User Repository (AUR) Package
+
+To install the plugin from the [AUR](https://aur.archlinux.org/packages/kubectl-cnpg), follow these steps:
+
+```sh
+git clone https://aur.archlinux.org/kubectl-cnpg.git
+cd kubectl-cnpg
+makepkg -si
+```
+
+Or use your favourite AUR-helper, for example [paru](https://github.com/Morganamilo/paru):
+
+```sh
+paru -S kubectl-cnpg
+```
+
### Using Krew
If you already have [Krew](https://krew.sigs.k8s.io/) installed, you can simply
From f115e4816689a5b5817e29635129fb8edd79e233 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Tue, 7 Jan 2025 10:19:28 +0100
Subject: [PATCH 290/836] chore: fix spell checker (#6526)
Signed-off-by: Leonardo Cecchi
---
.wordlist-en-custom.txt | 2 ++
docs/src/kubectl-plugin.md | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 16a65fde9c..bb773940fd 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -2,6 +2,7 @@ AES
API's
APIs
ARMv
+AUR
AZ
AZs
AcolumnName
@@ -1026,6 +1027,7 @@ ownerMetadata
ownerReference
packagemanifests
parseable
+paru
passfile
passwd
passwordSecret
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index 2637b250a3..485e1d9aed 100644
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -90,7 +90,7 @@ cd kubectl-cnpg
makepkg -si
```
-Or use your favourite AUR-helper, for example [paru](https://github.com/Morganamilo/paru):
+Or use your favorite AUR-helper, for example [paru](https://github.com/Morganamilo/paru):
```sh
paru -S kubectl-cnpg
From 1f115523d03f77d5b1e3e276039b534636bff867 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Tue, 7 Jan 2025 10:40:18 +0100
Subject: [PATCH 291/836] chore(doc): improve compatibility pkg documentation
(#6495)
The `SetCoredumpFilter` for Darwin OS had an incorrect reference to
Windows.
Also, take the chance to add the doc.go file
Signed-off-by: Armando Ruocco
---
pkg/system/compatibility/darwin.go | 3 +--
pkg/system/compatibility/doc.go | 18 ++++++++++++++++++
2 files changed, 19 insertions(+), 2 deletions(-)
create mode 100644 pkg/system/compatibility/doc.go
diff --git a/pkg/system/compatibility/darwin.go b/pkg/system/compatibility/darwin.go
index 83efd17ac3..64c76e9e24 100644
--- a/pkg/system/compatibility/darwin.go
+++ b/pkg/system/compatibility/darwin.go
@@ -17,10 +17,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package compatibility provides a layer to cross-compile with other OS than Linux
package compatibility
-// SetCoredumpFilter for Windows compatibility
+// SetCoredumpFilter for darwin compatibility
func SetCoredumpFilter(_ string) error {
return nil
}
diff --git a/pkg/system/compatibility/doc.go b/pkg/system/compatibility/doc.go
new file mode 100644
index 0000000000..5777cdce6d
--- /dev/null
+++ b/pkg/system/compatibility/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package compatibility provides a layer to cross-compile with other OS than Linux
+package compatibility
From cc91f9380f2f787955d2b0d482c70ec8d5773608 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Tue, 7 Jan 2025 10:49:22 +0100
Subject: [PATCH 292/836] chore: introduce `stale` GitHub action (#6525)
Closes #6524
Signed-off-by: Gabriele Bartolini
---
.github/workflows/stale.yml | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
create mode 100644 .github/workflows/stale.yml
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000000..6e3ae0010c
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,23 @@
+# See https://github.com/marketplace/actions/close-stale-issues
+name: Close inactive issues
+on:
+ schedule:
+ - cron: "30 1 * * *"
+
+jobs:
+ close-issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ #pull-requests: write
+ steps:
+ - uses: actions/stale@v5
+ with:
+ days-before-issue-stale: 60
+ days-before-issue-close: 14
+ stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
+ close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
+ days-before-pr-stale: -1
+ days-before-pr-close: -1
+ # Comment next line before going to production
+ debug-only: true
From b43d7738ee6425420b4bc404fda0fd7f64d01898 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Tue, 7 Jan 2025 11:31:25 +0100
Subject: [PATCH 293/836] ci: update stale github action (#6529)
Signed-off-by: Leonardo Cecchi
---
.github/workflows/{stale.yml => close-inactive-issues.yml} | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
rename .github/workflows/{stale.yml => close-inactive-issues.yml} (93%)
diff --git a/.github/workflows/stale.yml b/.github/workflows/close-inactive-issues.yml
similarity index 93%
rename from .github/workflows/stale.yml
rename to .github/workflows/close-inactive-issues.yml
index 6e3ae0010c..9200682e65 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/close-inactive-issues.yml
@@ -1,6 +1,7 @@
# See https://github.com/marketplace/actions/close-stale-issues
name: Close inactive issues
on:
+ workflow_dispatch:
schedule:
- cron: "30 1 * * *"
@@ -11,7 +12,7 @@ jobs:
issues: write
#pull-requests: write
steps:
- - uses: actions/stale@v5
+ - uses: actions/stale@v9
with:
days-before-issue-stale: 60
days-before-issue-close: 14
From b336f16314f2bd0c994beb48f089d64c8a9205b3 Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Tue, 7 Jan 2025 11:53:04 +0100
Subject: [PATCH 294/836] chore(ci): remove debug only flag from stale github
action (#6531)
Signed-off-by: Leonardo Cecchi
---
.github/workflows/close-inactive-issues.yml | 2 --
1 file changed, 2 deletions(-)
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml
index 9200682e65..df969a7bb4 100644
--- a/.github/workflows/close-inactive-issues.yml
+++ b/.github/workflows/close-inactive-issues.yml
@@ -20,5 +20,3 @@ jobs:
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1
- # Comment next line before going to production
- debug-only: true
From 65d27ffcf17c16a0e1e83caa626879c634cc46b2 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Tue, 7 Jan 2025 14:01:36 +0100
Subject: [PATCH 295/836] ci(workflow): set right comment in stale issues
(#6533)
Signed-off-by: Gabriele Bartolini
---
.github/workflows/close-inactive-issues.yml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml
index df969a7bb4..3ee8af2c20 100644
--- a/.github/workflows/close-inactive-issues.yml
+++ b/.github/workflows/close-inactive-issues.yml
@@ -16,7 +16,8 @@ jobs:
with:
days-before-issue-stale: 60
days-before-issue-close: 14
- stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
+ stale-issue-message: "This issue is stale because it has been open for 60 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1
+ ascending: true
From 14427a6bee09ff9be9ae3f54673e22361301c6b0 Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Wed, 8 Jan 2025 10:35:32 +0100
Subject: [PATCH 296/836] change color to green if replication slots are
enabled (#6534)
Fix #6459
Signed-off-by: Pierrick
---
internal/cmd/plugin/status/status.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go
index c27018f386..4d07622063 100644
--- a/internal/cmd/plugin/status/status.go
+++ b/internal/cmd/plugin/status/status.go
@@ -655,7 +655,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatus(verbosity int) {
}
if fullStatus.areReplicationSlotsEnabled() {
- fmt.Println(aurora.Yellow("Replication Slots Enabled").String())
+ fmt.Println(aurora.Green("Replication Slots Enabled").String())
}
status := tabby.New()
From ec6a00eec201bdfb4c41b5e46ed2d7ff80732a5d Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Wed, 8 Jan 2025 16:07:00 +0100
Subject: [PATCH 297/836] fix: restore functionality of promote plugin command
(#6476)
This pull request addresses an issue introduced in the latest plugin
release, which rendered the `promote` command ineffective. The patch
restores the command functionality and adds a unit test to prevent the
issue from happening again.
Closes #6475
## Release notes
Resolved an issue causing the `promote` plugin command not to work.
Signed-off-by: Jaime Silvela
Signed-off-by: Armando Ruocco
Signed-off-by: Leonardo Cecchi
Co-authored-by: Armando Ruocco
Co-authored-by: Leonardo Cecchi
Co-authored-by: Gabriele Quaresima
---
internal/cmd/plugin/promote/cmd.go | 2 +-
internal/cmd/plugin/promote/promote.go | 36 +++----
internal/cmd/plugin/promote/promote_test.go | 76 +++++++++++++
internal/cmd/plugin/promote/suite_test.go | 30 ++++++
internal/cmd/plugin/restart/restart.go | 9 +-
internal/controller/cluster_status.go | 8 +-
.../controller/instance_controller.go | 14 ++-
pkg/resources/status/phase.go | 101 ------------------
pkg/resources/status/transactions.go | 58 ++++++++++
pkg/resources/status/update.go | 20 +++-
10 files changed, 220 insertions(+), 134 deletions(-)
create mode 100644 internal/cmd/plugin/promote/promote_test.go
create mode 100644 internal/cmd/plugin/promote/suite_test.go
delete mode 100644 pkg/resources/status/phase.go
create mode 100644 pkg/resources/status/transactions.go
diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go
index 111c4291b0..c8e0db5bdc 100644
--- a/internal/cmd/plugin/promote/cmd.go
+++ b/internal/cmd/plugin/promote/cmd.go
@@ -40,7 +40,7 @@ func NewCmd() *cobra.Command {
if _, err := strconv.Atoi(args[1]); err == nil {
node = fmt.Sprintf("%s-%s", clusterName, node)
}
- return Promote(ctx, clusterName, node)
+ return Promote(ctx, plugin.Client, plugin.Namespace, clusterName, node)
},
}
diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go
index daba43bf78..a2e160cb37 100644
--- a/internal/cmd/plugin/promote/promote.go
+++ b/internal/cmd/plugin/promote/promote.go
@@ -26,18 +26,19 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
"github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status"
)
-// Promote command implementation
-func Promote(ctx context.Context, clusterName string, serverName string) error {
+// Promote promotes an instance in a cluster
+func Promote(ctx context.Context, cli client.Client,
+ namespace, clusterName, serverName string,
+) error {
var cluster apiv1.Cluster
// Get the Cluster object
- err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster)
+ err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, &cluster)
if err != nil {
- return fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err)
+ return fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, namespace, err)
}
// If server name is equal to target primary, there is no need to promote
@@ -49,22 +50,21 @@ func Promote(ctx context.Context, clusterName string, serverName string) error {
// Check if the Pod exist
var pod v1.Pod
- err = plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: serverName}, &pod)
+ err = cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: serverName}, &pod)
if err != nil {
- return fmt.Errorf("new primary node %s not found in namespace %s: %w", serverName, plugin.Namespace, err)
+ return fmt.Errorf("new primary node %s not found in namespace %s: %w", serverName, namespace, err)
}
- // The Pod exists, let's update status fields
- origCluster := cluster.DeepCopy()
- cluster.Status.TargetPrimary = serverName
- cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp()
- if err := status.RegisterPhaseWithOrigCluster(
- ctx,
- plugin.Client,
- &cluster,
- origCluster,
- apiv1.PhaseSwitchover,
- fmt.Sprintf("Switching over to %v", serverName),
+ // The Pod exists, let's update the cluster's status with the new target primary
+ reconcileTargetPrimaryFunc := func(cluster *apiv1.Cluster) {
+ cluster.Status.TargetPrimary = serverName
+ cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp()
+ cluster.Status.Phase = apiv1.PhaseSwitchover
+ cluster.Status.PhaseReason = fmt.Sprintf("Switching over to %v", serverName)
+ }
+ if err := status.PatchWithOptimisticLock(ctx, cli, &cluster,
+ reconcileTargetPrimaryFunc,
+ status.SetClusterReadyConditionTX,
); err != nil {
return err
}
diff --git a/internal/cmd/plugin/promote/promote_test.go b/internal/cmd/plugin/promote/promote_test.go
new file mode 100644
index 0000000000..46ba5ff1a6
--- /dev/null
+++ b/internal/cmd/plugin/promote/promote_test.go
@@ -0,0 +1,76 @@
+package promote
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ k8client "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("promote subcommand tests", func() {
+ var client k8client.Client
+ const namespace = "theNamespace"
+ BeforeEach(func() {
+ cluster1 := apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster1",
+ Namespace: namespace,
+ },
+ Spec: apiv1.ClusterSpec{},
+ Status: apiv1.ClusterStatus{
+ CurrentPrimary: "cluster1-1",
+ TargetPrimary: "cluster1-1",
+ Phase: apiv1.PhaseHealthy,
+ Conditions: []metav1.Condition{
+ {
+ Type: string(apiv1.ConditionClusterReady),
+ Status: metav1.ConditionTrue,
+ Reason: string(apiv1.ClusterReady),
+ Message: "Cluster is Ready",
+ },
+ },
+ },
+ }
+ newPod := corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster1-2",
+ Namespace: namespace,
+ },
+ }
+ client = fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()).
+ WithObjects(&cluster1, &newPod).WithStatusSubresource(&cluster1).Build()
+ })
+
+ It("correctly sets the target primary and the phase if the target pod is present", func(ctx SpecContext) {
+ Expect(Promote(ctx, client, namespace, "cluster1", "cluster1-2")).
+ To(Succeed())
+ var cl apiv1.Cluster
+ Expect(client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: "cluster1"}, &cl)).
+ To(Succeed())
+ Expect(cl.Status.TargetPrimary).To(Equal("cluster1-2"))
+ Expect(cl.Status.Phase).To(Equal(apiv1.PhaseSwitchover))
+ Expect(cl.Status.PhaseReason).To(Equal("Switching over to cluster1-2"))
+ Expect(meta.IsStatusConditionTrue(cl.Status.Conditions, string(apiv1.ConditionClusterReady))).
+ To(BeFalse())
+ })
+
+ It("ignores the promotion if the target pod is missing", func(ctx SpecContext) {
+ err := Promote(ctx, client, namespace, "cluster1", "cluster1-missingPod")
+ Expect(err).To(HaveOccurred())
+ var cl apiv1.Cluster
+ Expect(client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: "cluster1"}, &cl)).
+ To(Succeed())
+ Expect(cl.Status.TargetPrimary).To(Equal("cluster1-1"))
+ Expect(cl.Status.Phase).To(Equal(apiv1.PhaseHealthy))
+ Expect(meta.IsStatusConditionTrue(cl.Status.Conditions, string(apiv1.ConditionClusterReady))).
+ To(BeTrue())
+ })
+})
diff --git a/internal/cmd/plugin/promote/suite_test.go b/internal/cmd/plugin/promote/suite_test.go
new file mode 100644
index 0000000000..3c0e363370
--- /dev/null
+++ b/internal/cmd/plugin/promote/suite_test.go
@@ -0,0 +1,30 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package promote
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestPlugin(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "Promote plugin Suite")
+}
diff --git a/internal/cmd/plugin/restart/restart.go b/internal/cmd/plugin/restart/restart.go
index ba2484546a..4b863e426e 100644
--- a/internal/cmd/plugin/restart/restart.go
+++ b/internal/cmd/plugin/restart/restart.go
@@ -66,17 +66,14 @@ func instanceRestart(ctx context.Context, clusterName, node string) error {
if err != nil {
return err
}
- originalCluster := cluster.DeepCopy()
if cluster.Status.CurrentPrimary == node {
- cluster.ManagedFields = nil
- if err := status.RegisterPhaseWithOrigCluster(
+ if err := status.PatchWithOptimisticLock(
ctx,
plugin.Client,
&cluster,
- originalCluster,
- apiv1.PhaseInplacePrimaryRestart,
- "Requested by the user",
+ status.SetPhaseTX(apiv1.PhaseInplacePrimaryRestart, "Requested by the user"),
+ status.SetClusterReadyConditionTX,
); err != nil {
return fmt.Errorf("while requesting restart on primary POD for cluster %v: %w", clusterName, err)
}
diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go
index 4f5a2e673a..a092f955c2 100644
--- a/internal/controller/cluster_status.go
+++ b/internal/controller/cluster_status.go
@@ -732,7 +732,13 @@ func (r *ClusterReconciler) RegisterPhase(ctx context.Context,
phase string,
reason string,
) error {
- return status.RegisterPhase(ctx, r.Client, cluster, phase, reason)
+ return status.PatchWithOptimisticLock(
+ ctx,
+ r.Client,
+ cluster,
+ status.SetPhaseTX(phase, reason),
+ status.SetClusterReadyConditionTX,
+ )
}
// updateClusterStatusThatRequiresInstancesState updates all the cluster status fields that require the instances status
diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go
index dda32b0920..d6900697c1 100644
--- a/internal/management/controller/instance_controller.go
+++ b/internal/management/controller/instance_controller.go
@@ -327,12 +327,12 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested(
return true, err
}
- return true, clusterstatus.RegisterPhase(
+ return true, clusterstatus.PatchWithOptimisticLock(
ctx,
r.client,
cluster,
- apiv1.PhaseHealthy,
- "Primary instance restarted in-place",
+ clusterstatus.SetPhaseTX(apiv1.PhaseHealthy, "Primary instance restarted in-place"),
+ clusterstatus.SetClusterReadyConditionTX,
)
}
return false, nil
@@ -1065,7 +1065,13 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con
return nil
}
- return clusterstatus.RegisterPhase(ctx, r.client, cluster, phase, phaseReason)
+ return clusterstatus.PatchWithOptimisticLock(
+ ctx,
+ r.client,
+ cluster,
+ clusterstatus.SetPhaseTX(phase, phaseReason),
+ clusterstatus.SetClusterReadyConditionTX,
+ )
}
// refreshCertificateFilesFromSecret receive a secret and rewrite the file
diff --git a/pkg/resources/status/phase.go b/pkg/resources/status/phase.go
deleted file mode 100644
index bac80933c5..0000000000
--- a/pkg/resources/status/phase.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package status
-
-import (
- "context"
- "fmt"
-
- "github.com/cloudnative-pg/machinery/pkg/log"
- "k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
-)
-
-// RegisterPhase update phase in the status cluster with the
-// proper reason
-func RegisterPhase(
- ctx context.Context,
- cli client.Client,
- cluster *apiv1.Cluster,
- phase string,
- reason string,
-) error {
- existingCluster := cluster.DeepCopy()
- return RegisterPhaseWithOrigCluster(ctx, cli, cluster, existingCluster, phase, reason)
-}
-
-// RegisterPhaseWithOrigCluster update phase in the status cluster with the
-// proper reason, it also receives an origCluster to preserve other modifications done to the status
-func RegisterPhaseWithOrigCluster(
- ctx context.Context,
- cli client.Client,
- modifiedCluster *apiv1.Cluster,
- origCluster *apiv1.Cluster,
- phase string,
- reason string,
-) error {
- if err := PatchWithOptimisticLock(
- ctx,
- cli,
- modifiedCluster,
- func(cluster *apiv1.Cluster) {
- if cluster.Status.Conditions == nil {
- cluster.Status.Conditions = []metav1.Condition{}
- }
-
- cluster.Status.Phase = phase
- cluster.Status.PhaseReason = reason
-
- condition := metav1.Condition{
- Type: string(apiv1.ConditionClusterReady),
- Status: metav1.ConditionFalse,
- Reason: string(apiv1.ClusterIsNotReady),
- Message: "Cluster Is Not Ready",
- }
-
- if cluster.Status.Phase == apiv1.PhaseHealthy {
- condition = metav1.Condition{
- Type: string(apiv1.ConditionClusterReady),
- Status: metav1.ConditionTrue,
- Reason: string(apiv1.ClusterReady),
- Message: "Cluster is Ready",
- }
- }
-
- meta.SetStatusCondition(&cluster.Status.Conditions, condition)
- },
- ); err != nil {
- return fmt.Errorf("while updating phase: %w", err)
- }
-
- contextLogger := log.FromContext(ctx)
-
- modifiedPhase := modifiedCluster.Status.Phase
- origPhase := origCluster.Status.Phase
-
- if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy {
- contextLogger.Info("Cluster is not healthy")
- }
- if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy {
- contextLogger.Info("Cluster is healthy")
- }
-
- return nil
-}
diff --git a/pkg/resources/status/transactions.go b/pkg/resources/status/transactions.go
new file mode 100644
index 0000000000..ca04d437a9
--- /dev/null
+++ b/pkg/resources/status/transactions.go
@@ -0,0 +1,58 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package status
+
+import (
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+// SetClusterReadyConditionTX updates the cluster's readiness condition
+// according to the cluster phase
+func SetClusterReadyConditionTX(cluster *apiv1.Cluster) {
+ if cluster.Status.Conditions == nil {
+ cluster.Status.Conditions = []metav1.Condition{}
+ }
+
+ condition := metav1.Condition{
+ Type: string(apiv1.ConditionClusterReady),
+ Status: metav1.ConditionFalse,
+ Reason: string(apiv1.ClusterIsNotReady),
+ Message: "Cluster Is Not Ready",
+ }
+
+ if cluster.Status.Phase == apiv1.PhaseHealthy {
+ condition = metav1.Condition{
+ Type: string(apiv1.ConditionClusterReady),
+ Status: metav1.ConditionTrue,
+ Reason: string(apiv1.ClusterReady),
+ Message: "Cluster is Ready",
+ }
+ }
+
+ meta.SetStatusCondition(&cluster.Status.Conditions, condition)
+}
+
+// SetPhaseTX is a transaction that sets the cluster phase and reason
+func SetPhaseTX(phase string, reason string) func(cluster *apiv1.Cluster) {
+ return func(cluster *apiv1.Cluster) {
+ cluster.Status.Phase = phase
+ cluster.Status.PhaseReason = reason
+ }
+}
diff --git a/pkg/resources/status/update.go b/pkg/resources/status/update.go
index 0543292d9e..3f916cd0e1 100644
--- a/pkg/resources/status/update.go
+++ b/pkg/resources/status/update.go
@@ -20,6 +20,7 @@ import (
"context"
"fmt"
+ "github.com/cloudnative-pg/machinery/pkg/log"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -28,19 +29,23 @@ import (
)
// PatchWithOptimisticLock updates the status of the cluster using the passed
-// transaction function.
+// transaction functions (in the given order).
// Important: after successfully updating the status, this
// function refreshes it into the passed cluster
func PatchWithOptimisticLock(
ctx context.Context,
c client.Client,
cluster *apiv1.Cluster,
- tx func(cluster *apiv1.Cluster),
+ txs ...func(cluster *apiv1.Cluster),
) error {
if cluster == nil {
return nil
}
+ contextLogger := log.FromContext(ctx)
+
+ origCluster := cluster.DeepCopy()
+
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var currentCluster apiv1.Cluster
if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil {
@@ -48,7 +53,9 @@ func PatchWithOptimisticLock(
}
updatedCluster := currentCluster.DeepCopy()
- tx(updatedCluster)
+ for _, tx := range txs {
+ tx(updatedCluster)
+ }
if equality.Semantic.DeepEqual(currentCluster.Status, updatedCluster.Status) {
return nil
@@ -69,5 +76,12 @@ func PatchWithOptimisticLock(
return fmt.Errorf("while updating conditions: %w", err)
}
+ if cluster.Status.Phase != apiv1.PhaseHealthy && origCluster.Status.Phase == apiv1.PhaseHealthy {
+ contextLogger.Info("Cluster has become unhealthy")
+ }
+ if cluster.Status.Phase == apiv1.PhaseHealthy && origCluster.Status.Phase != apiv1.PhaseHealthy {
+ contextLogger.Info("Cluster has become healthy")
+ }
+
return nil
}
From 70bf6aad6fed4ac078c9ba87cfe64b89b4b64d86 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Wed, 8 Jan 2025 16:57:11 +0100
Subject: [PATCH 298/836] chore: correct typos in the `resource/status` pkg
(#6543)
Signed-off-by: Armando Ruocco
---
pkg/resources/status/conditions.go | 2 +-
pkg/resources/status/{update.go => patch.go} | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
rename pkg/resources/status/{update.go => patch.go} (97%)
diff --git a/pkg/resources/status/conditions.go b/pkg/resources/status/conditions.go
index 54b09a056b..c8b814aded 100644
--- a/pkg/resources/status/conditions.go
+++ b/pkg/resources/status/conditions.go
@@ -74,7 +74,7 @@ func PatchConditionsWithOptimisticLock(
return nil
}); err != nil {
- return fmt.Errorf("while updating conditions: %w", err)
+ return fmt.Errorf("while patching conditions: %w", err)
}
return nil
diff --git a/pkg/resources/status/update.go b/pkg/resources/status/patch.go
similarity index 97%
rename from pkg/resources/status/update.go
rename to pkg/resources/status/patch.go
index 3f916cd0e1..3613a46f43 100644
--- a/pkg/resources/status/update.go
+++ b/pkg/resources/status/patch.go
@@ -73,7 +73,7 @@ func PatchWithOptimisticLock(
return nil
}); err != nil {
- return fmt.Errorf("while updating conditions: %w", err)
+ return fmt.Errorf("while patching status: %w", err)
}
if cluster.Status.Phase != apiv1.PhaseHealthy && origCluster.Status.Phase == apiv1.PhaseHealthy {
From 651b4cf749086b94d5781e8d9e2f73e68bbef0f7 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Thu, 9 Jan 2025 12:02:14 +0100
Subject: [PATCH 299/836] refactor: move webhooks in a dedicated package
(#6472)
This refactoring enhances code organization by moving all
webhook-related functionalities into a separate package, aligning with
the `go/v4` Kubebuilder project layout.
Closes #6471
Signed-off-by: Marco Nenciarini
---
api/v1/backup_webhook.go | 120 -
api/v1/cluster_defaults.go | 263 ++
api/v1/cluster_defaults_test.go | 316 ++
api/v1/cluster_funcs.go | 9 +-
api/v1/cluster_funcs_test.go | 12 +-
api/v1/cluster_types.go | 8 +-
api/v1/pooler_webhook.go | 223 --
api/v1/scheduledbackup_funcs_test.go | 30 -
api/v1/scheduledbackup_webhook.go | 130 -
api/v1/scheduledbackup_webhook_test.go | 90 -
internal/cmd/manager/controller/controller.go | 11 +-
internal/webhook/v1/backup_webhook.go | 172 +
.../webhook}/v1/backup_webhook_test.go | 40 +-
.../webhook}/v1/cluster_webhook.go | 661 ++--
.../webhook}/v1/cluster_webhook_test.go | 3118 ++++++++---------
internal/webhook/v1/doc.go | 18 +
internal/webhook/v1/pooler_webhook.go | 254 ++
.../webhook}/v1/pooler_webhook_test.go | 83 +-
.../webhook/v1/scheduledbackup_webhook.go | 190 +
.../v1/scheduledbackup_webhook_test.go | 126 +
internal/webhook/v1/suite_test.go | 30 +
tests/e2e/asserts_test.go | 12 +-
tests/e2e/webhook_test.go | 7 +-
23 files changed, 3214 insertions(+), 2709 deletions(-)
delete mode 100644 api/v1/backup_webhook.go
create mode 100644 api/v1/cluster_defaults.go
create mode 100644 api/v1/cluster_defaults_test.go
delete mode 100644 api/v1/pooler_webhook.go
delete mode 100644 api/v1/scheduledbackup_webhook.go
delete mode 100644 api/v1/scheduledbackup_webhook_test.go
create mode 100644 internal/webhook/v1/backup_webhook.go
rename {api => internal/webhook}/v1/backup_webhook_test.go (66%)
rename {api => internal/webhook}/v1/cluster_webhook.go (77%)
rename {api => internal/webhook}/v1/cluster_webhook_test.go (55%)
create mode 100644 internal/webhook/v1/doc.go
create mode 100644 internal/webhook/v1/pooler_webhook.go
rename {api => internal/webhook}/v1/pooler_webhook_test.go (52%)
create mode 100644 internal/webhook/v1/scheduledbackup_webhook.go
create mode 100644 internal/webhook/v1/scheduledbackup_webhook_test.go
create mode 100644 internal/webhook/v1/suite_test.go
diff --git a/api/v1/backup_webhook.go b/api/v1/backup_webhook.go
deleted file mode 100644
index aec1fd54f9..0000000000
--- a/api/v1/backup_webhook.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
- "github.com/cloudnative-pg/machinery/pkg/log"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/validation/field"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/webhook"
- "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
-)
-
-// backupLog is for logging in this package.
-var backupLog = log.WithName("backup-resource").WithValues("version", "v1")
-
-// SetupWebhookWithManager setup the webhook inside the controller manager
-func (r *Backup) SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
- For(r).
- Complete()
-}
-
-// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-backup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,verbs=create;update,versions=v1,name=mbackup.cnpg.io,sideEffects=None
-
-var _ webhook.Defaulter = &Backup{}
-
-// Default implements webhook.Defaulter so a webhook will be registered for the type
-func (r *Backup) Default() {
- backupLog.Info("default", "name", r.Name, "namespace", r.Namespace)
-}
-
-// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
-// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-backup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,versions=v1,name=vbackup.cnpg.io,sideEffects=None
-
-var _ webhook.Validator = &Backup{}
-
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
-func (r *Backup) ValidateCreate() (admission.Warnings, error) {
- backupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace)
- allErrs := r.validate()
- if len(allErrs) == 0 {
- return nil, nil
- }
-
- return nil, apierrors.NewInvalid(
- schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"},
- r.Name, allErrs)
-}
-
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
-func (r *Backup) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
- backupLog.Info("validate update", "name", r.Name, "namespace", r.Namespace)
- return r.ValidateCreate()
-}
-
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
-func (r *Backup) ValidateDelete() (admission.Warnings, error) {
- backupLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace)
- return nil, nil
-}
-
-func (r *Backup) validate() field.ErrorList {
- var result field.ErrorList
-
- if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() {
- result = append(result, field.Invalid(
- field.NewPath("spec", "method"),
- r.Spec.Method,
- "Cannot use volumeSnapshot backup method due to missing "+
- "VolumeSnapshot CRD. If you installed the CRD after having "+
- "started the operator, please restart it to enable "+
- "VolumeSnapshot support",
- ))
- }
-
- if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.Online != nil {
- result = append(result, field.Invalid(
- field.NewPath("spec", "online"),
- r.Spec.Online,
- "Online parameter can be specified only if the backup method is volumeSnapshot",
- ))
- }
-
- if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil {
- result = append(result, field.Invalid(
- field.NewPath("spec", "onlineConfiguration"),
- r.Spec.OnlineConfiguration,
- "OnlineConfiguration parameter can be specified only if the backup method is volumeSnapshot",
- ))
- }
-
- if r.Spec.Method == BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() {
- result = append(result, field.Invalid(
- field.NewPath("spec", "pluginConfiguration"),
- r.Spec.OnlineConfiguration,
- "cannot be empty when the backup method is plugin",
- ))
- }
-
- return result
-}
diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go
new file mode 100644
index 0000000000..e368655cb8
--- /dev/null
+++ b/api/v1/cluster_defaults.go
@@ -0,0 +1,263 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
+ "k8s.io/utils/ptr"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+const (
+ // DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries
+ DefaultMonitoringKey = "queries"
+ // DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries,
+ // if configured
+ DefaultMonitoringConfigMapName = "cnpg-default-monitoring"
+ // DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries,
+ // if configured
+ DefaultMonitoringSecretName = DefaultMonitoringConfigMapName
+ // DefaultApplicationDatabaseName is the name of application database if not specified
+ DefaultApplicationDatabaseName = "app"
+ // DefaultApplicationUserName is the name of application database owner if not specified
+ DefaultApplicationUserName = DefaultApplicationDatabaseName
+)
+
+// Default apply the defaults to undefined values in a Cluster preserving the user settings
+func (r *Cluster) Default() {
+ r.setDefaults(true)
+}
+
+// SetDefaults apply the defaults to undefined values in a Cluster
+func (r *Cluster) SetDefaults() {
+ r.setDefaults(false)
+}
+
+func (r *Cluster) setDefaults(preserveUserSettings bool) {
+ // Defaulting the image name if not specified
+ if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil {
+ r.Spec.ImageName = configuration.Current.PostgresImageName
+ }
+
+ // Defaulting the bootstrap method if not specified
+ if r.Spec.Bootstrap == nil {
+ r.Spec.Bootstrap = &BootstrapConfiguration{}
+ }
+
+ // Defaulting initDB if no other bootstrap method was passed
+ switch {
+ case r.Spec.Bootstrap.Recovery != nil:
+ r.defaultRecovery()
+ case r.Spec.Bootstrap.PgBaseBackup != nil:
+ r.defaultPgBaseBackup()
+ default:
+ r.defaultInitDB()
+ }
+
+ // Defaulting the pod anti-affinity type if podAntiAffinity
+ if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) &&
+ r.Spec.Affinity.PodAntiAffinityType == "" {
+ r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred
+ }
+
+ if r.Spec.Backup != nil && r.Spec.Backup.Target == "" {
+ r.Spec.Backup.Target = DefaultBackupTarget
+ }
+
+ psqlVersion, err := r.GetPostgresqlVersion()
+ if err == nil {
+ // The validation error will be already raised by the
+ // validateImageName function
+ info := postgres.ConfigurationInfo{
+ Settings: postgres.CnpgConfigurationSettings,
+ Version: psqlVersion,
+ UserSettings: r.Spec.PostgresConfiguration.Parameters,
+ IsReplicaCluster: r.IsReplica(),
+ PreserveFixedSettingsFromUser: preserveUserSettings,
+ IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta),
+ IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem,
+ }
+ sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters()
+ r.Spec.PostgresConfiguration.Parameters = sanitizedParameters
+ }
+
+ if r.Spec.LogLevel == "" {
+ r.Spec.LogLevel = log.InfoLevelString
+ }
+
+ // we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty
+ // and defaultQueries not disabled on cluster crd
+ if !r.Spec.Monitoring.AreDefaultQueriesDisabled() {
+ r.defaultMonitoringQueries(configuration.Current)
+ }
+
+ // If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots
+ if r.Spec.ReplicationSlots == nil {
+ r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{}
+ }
+ if r.Spec.ReplicationSlots.HighAvailability == nil {
+ r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{
+ Enabled: ptr.To(true),
+ SlotPrefix: "_cnpg_",
+ }
+ }
+ if r.Spec.ReplicationSlots.SynchronizeReplicas == nil {
+ r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{
+ Enabled: ptr.To(true),
+ }
+ }
+
+ if len(r.Spec.Tablespaces) > 0 {
+ r.defaultTablespaces()
+ }
+
+ r.setDefaultPlugins(configuration.Current)
+}
+
+func (r *Cluster) setDefaultPlugins(config *configuration.Data) {
+ // Add the list of pre-defined plugins
+ foundPlugins := stringset.New()
+ for _, plugin := range r.Spec.Plugins {
+ foundPlugins.Put(plugin.Name)
+ }
+
+ for _, pluginName := range config.GetIncludePlugins() {
+ if !foundPlugins.Has(pluginName) {
+ r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{
+ Name: pluginName,
+ Enabled: ptr.To(true),
+ })
+ }
+ }
+}
+
+// defaultTablespaces adds the tablespace owner where the
+// user didn't specify it
+func (r *Cluster) defaultTablespaces() {
+ defaultOwner := r.GetApplicationDatabaseOwner()
+ if len(defaultOwner) == 0 {
+ defaultOwner = "postgres"
+ }
+
+ for name, tablespaceConfiguration := range r.Spec.Tablespaces {
+ if len(tablespaceConfiguration.Owner.Name) == 0 {
+ tablespaceConfiguration.Owner.Name = defaultOwner
+ }
+ r.Spec.Tablespaces[name] = tablespaceConfiguration
+ }
+}
+
+// defaultMonitoringQueries adds the default monitoring queries configMap
+// if not already present in CustomQueriesConfigMap
+func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) {
+ if r.Spec.Monitoring == nil {
+ r.Spec.Monitoring = &MonitoringConfiguration{}
+ }
+
+ if config.MonitoringQueriesConfigmap != "" {
+ var defaultConfigMapQueriesAlreadyPresent bool
+ // We check if the default queries are already inserted in the monitoring configuration
+ for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap {
+ if monitoringConfigMap.Name == DefaultMonitoringConfigMapName {
+ defaultConfigMapQueriesAlreadyPresent = true
+ break
+ }
+ }
+
+ // If the default queries are already present there is no need to re-add them.
+ // Please note that in this case that the default configMap could overwrite user existing queries
+ // depending on the order. This is an accepted behavior because the user willingly defined the order of his array
+ if !defaultConfigMapQueriesAlreadyPresent {
+ r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{
+ {
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
+ Key: DefaultMonitoringKey,
+ },
+ }, r.Spec.Monitoring.CustomQueriesConfigMap...)
+ }
+ }
+
+ if config.MonitoringQueriesSecret != "" {
+ var defaultSecretQueriesAlreadyPresent bool
+ // we check if the default queries are already inserted in the monitoring configuration
+ for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret {
+ if monitoringSecret.Name == DefaultMonitoringSecretName {
+ defaultSecretQueriesAlreadyPresent = true
+ break
+ }
+ }
+
+ if !defaultSecretQueriesAlreadyPresent {
+ r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{
+ {
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
+ Key: DefaultMonitoringKey,
+ },
+ }, r.Spec.Monitoring.CustomQueriesSecret...)
+ }
+ }
+}
+
+// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed
+func (r *Cluster) defaultInitDB() {
+ if r.Spec.Bootstrap.InitDB == nil {
+ r.Spec.Bootstrap.InitDB = &BootstrapInitDB{
+ Database: DefaultApplicationDatabaseName,
+ Owner: DefaultApplicationUserName,
+ }
+ }
+
+ if r.Spec.Bootstrap.InitDB.Database == "" {
+ r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName
+ }
+ if r.Spec.Bootstrap.InitDB.Owner == "" {
+ r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database
+ }
+ if r.Spec.Bootstrap.InitDB.Encoding == "" {
+ r.Spec.Bootstrap.InitDB.Encoding = "UTF8"
+ }
+ if r.Spec.Bootstrap.InitDB.LocaleCollate == "" {
+ r.Spec.Bootstrap.InitDB.LocaleCollate = "C"
+ }
+ if r.Spec.Bootstrap.InitDB.LocaleCType == "" {
+ r.Spec.Bootstrap.InitDB.LocaleCType = "C"
+ }
+}
+
+// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed
+func (r *Cluster) defaultRecovery() {
+ if r.Spec.Bootstrap.Recovery.Database == "" {
+ r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName
+ }
+ if r.Spec.Bootstrap.Recovery.Owner == "" {
+ r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database
+ }
+}
+
+// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed
+func (r *Cluster) defaultPgBaseBackup() {
+ if r.Spec.Bootstrap.PgBaseBackup.Database == "" {
+ r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName
+ }
+ if r.Spec.Bootstrap.PgBaseBackup.Owner == "" {
+ r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database
+ }
+}
diff --git a/api/v1/cluster_defaults_test.go b/api/v1/cluster_defaults_test.go
new file mode 100644
index 0000000000..4ff95bb8ca
--- /dev/null
+++ b/api/v1/cluster_defaults_test.go
@@ -0,0 +1,316 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/utils/ptr"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("cluster default configuration", func() {
+ It("defaults to creating an application database", func() {
+ cluster := Cluster{}
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app"))
+ Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app"))
+ })
+
+ It("defaults the owner user with the database name", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ InitDB: &BootstrapInitDB{
+ Database: "appdb",
+ },
+ },
+ },
+ }
+
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb"))
+ })
+
+ It("defaults to create an application database if recovery is used", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ Recovery: &BootstrapRecovery{},
+ },
+ },
+ }
+ cluster.Default()
+ Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue())
+ Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty())
+ Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty())
+ Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil())
+ })
+
+ It("defaults the owner user with the database name for recovery", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ Recovery: &BootstrapRecovery{
+ Database: "appdb",
+ },
+ },
+ },
+ }
+
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb"))
+ })
+
+ It("defaults to create an application database if pg_basebackup is used", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ PgBaseBackup: &BootstrapPgBaseBackup{},
+ },
+ },
+ }
+ cluster.Default()
+ Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue())
+ Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty())
+ Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty())
+ Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil())
+ })
+
+ It("defaults the owner user with the database name for pg_basebackup", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ PgBaseBackup: &BootstrapPgBaseBackup{
+ Database: "appdb",
+ },
+ },
+ },
+ }
+
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb"))
+ })
+
+ It("defaults the PostgreSQL configuration with parameters from the operator", func() {
+ cluster := Cluster{}
+ cluster.Default()
+ Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty())
+ })
+
+ It("defaults the anti-affinity", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Affinity: AffinityConfiguration{},
+ },
+ }
+ cluster.Default()
+ Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred))
+ Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil())
+ })
+
+ It("should fill the image name if isn't already set", func() {
+ cluster := Cluster{}
+ cluster.Default()
+ Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName))
+ })
+
+ It("shouldn't set the image name if already present", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ ImageName: "test:13",
+ },
+ }
+ cluster.Default()
+ Expect(cluster.Spec.ImageName).To(Equal("test:13"))
+ })
+
+ It("should setup the application database name", func() {
+ cluster := Cluster{}
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app"))
+ Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app"))
+ })
+
+ It("should set the owner name as the database name", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ InitDB: &BootstrapInitDB{
+ Database: "test",
+ },
+ },
+ },
+ }
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test"))
+ Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test"))
+ })
+
+ It("should not overwrite application database and owner settings", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Bootstrap: &BootstrapConfiguration{
+ InitDB: &BootstrapInitDB{
+ Database: "testdb",
+ Owner: "testuser",
+ },
+ },
+ },
+ }
+ cluster.Default()
+ Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb"))
+ Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser"))
+ })
+})
+
+var _ = Describe("Default monitoring queries", func() {
+ It("correctly set the default monitoring queries configmap and secret when none is already specified", func() {
+ cluster := &Cluster{}
+ cluster.defaultMonitoringQueries(&configuration.Data{
+ MonitoringQueriesSecret: "test-secret",
+ MonitoringQueriesConfigmap: "test-configmap",
+ })
+ Expect(cluster.Spec.Monitoring).NotTo(BeNil())
+ Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
+ Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).
+ To(ContainElement(ConfigMapKeySelector{
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
+ Key: DefaultMonitoringKey,
+ }))
+ Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
+ Expect(cluster.Spec.Monitoring.CustomQueriesSecret).
+ To(ContainElement(SecretKeySelector{
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
+ Key: DefaultMonitoringKey,
+ }))
+ })
+ testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{
+ CustomQueriesConfigMap: []ConfigMapKeySelector{
+ {
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
+ Key: "test2",
+ },
+ },
+ CustomQueriesSecret: []SecretKeySelector{
+ {
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
+ Key: "test3",
+ },
+ },
+ }}}
+ It("correctly set the default monitoring queries configmap when other metrics are already specified", func() {
+ modifiedCluster := testCluster.DeepCopy()
+ modifiedCluster.defaultMonitoringQueries(&configuration.Data{
+ MonitoringQueriesConfigmap: "test-configmap",
+ })
+
+ Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil())
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
+ To(ContainElement(ConfigMapKeySelector{
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
+ Key: "test2",
+ }))
+
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
+ To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret))
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
+ To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap))
+ })
+ It("correctly set the default monitoring queries secret when other metrics are already specified", func() {
+ modifiedCluster := testCluster.DeepCopy()
+ modifiedCluster.defaultMonitoringQueries(&configuration.Data{
+ MonitoringQueriesSecret: "test-secret",
+ })
+
+ Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil())
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
+ To(ContainElement(SecretKeySelector{
+ LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
+ Key: "test3",
+ }))
+
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
+ To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap))
+ Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
+ To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret))
+ })
+})
+
+var _ = Describe("setDefaultPlugins", func() {
+ It("adds pre-defined plugins if not already present", func() {
+ cluster := &Cluster{
+ Spec: ClusterSpec{
+ Plugins: []PluginConfiguration{
+ {Name: "existing-plugin", Enabled: ptr.To(true)},
+ },
+ },
+ }
+ config := &configuration.Data{
+ IncludePlugins: "predefined-plugin1,predefined-plugin2",
+ }
+
+ cluster.setDefaultPlugins(config)
+
+ Expect(cluster.Spec.Plugins).To(
+ ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)}))
+ Expect(cluster.Spec.Plugins).To(
+ ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)}))
+ Expect(cluster.Spec.Plugins).To(
+ ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)}))
+ })
+
+ It("does not add pre-defined plugins if already present", func() {
+ cluster := &Cluster{
+ Spec: ClusterSpec{
+ Plugins: []PluginConfiguration{
+ {Name: "predefined-plugin1", Enabled: ptr.To(false)},
+ },
+ },
+ }
+ config := &configuration.Data{
+ IncludePlugins: "predefined-plugin1,predefined-plugin2",
+ }
+
+ cluster.setDefaultPlugins(config)
+
+ Expect(cluster.Spec.Plugins).To(HaveLen(2))
+ Expect(cluster.Spec.Plugins).To(
+ ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)}))
+ Expect(cluster.Spec.Plugins).To(
+ ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)}))
+ })
+
+ It("handles empty plugin list gracefully", func() {
+ cluster := &Cluster{}
+ config := &configuration.Data{
+ IncludePlugins: "predefined-plugin1",
+ }
+
+ cluster.setDefaultPlugins(config)
+
+ Expect(cluster.Spec.Plugins).To(HaveLen(1))
+ Expect(cluster.Spec.Plugins).To(
+ ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)}))
+ })
+})
diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go
index a116aee695..132656c4db 100644
--- a/api/v1/cluster_funcs.go
+++ b/api/v1/cluster_funcs.go
@@ -182,6 +182,11 @@ func (r *SynchronizeReplicasConfiguration) GetEnabled() bool {
return true
}
+// ValidateRegex returns all the errors that happened during the regex compilation
+func (r *SynchronizeReplicasConfiguration) ValidateRegex() []error {
+ return r.compileRegex()
+}
+
// IsExcludedByUser returns if a replication slot should not be reconciled on the replicas
func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bool, error) {
if r == nil {
@@ -940,7 +945,7 @@ func (cluster *Cluster) ContainsTablespaces() bool {
// user
func (cluster Cluster) GetPostgresUID() int64 {
if cluster.Spec.PostgresUID == 0 {
- return defaultPostgresUID
+ return DefaultPostgresUID
}
return cluster.Spec.PostgresUID
}
@@ -949,7 +954,7 @@ func (cluster Cluster) GetPostgresUID() int64 {
// user
func (cluster Cluster) GetPostgresGID() int64 {
if cluster.Spec.PostgresGID == 0 {
- return defaultPostgresGID
+ return DefaultPostgresGID
}
return cluster.Spec.PostgresGID
}
diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go
index 34c67f363f..d0126362f5 100644
--- a/api/v1/cluster_funcs_test.go
+++ b/api/v1/cluster_funcs_test.go
@@ -1159,10 +1159,10 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() {
synchronizeReplicas = &SynchronizeReplicasConfiguration{}
})
- Context("compileRegex", func() {
+ Context("CompileRegex", func() {
It("should return no errors when SynchronizeReplicasConfiguration is nil", func() {
synchronizeReplicas = nil
- Expect(synchronizeReplicas.compileRegex()).To(BeEmpty())
+ Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty())
})
Context("when SynchronizeReplicasConfiguration is not nil", func() {
@@ -1171,7 +1171,7 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() {
})
It("should compile patterns without errors", func() {
- Expect(synchronizeReplicas.compileRegex()).To(BeEmpty())
+ Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty())
})
Context("when a pattern fails to compile", func() {
@@ -1180,15 +1180,15 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() {
})
It("should return errors for the invalid pattern", func() {
- errors := synchronizeReplicas.compileRegex()
+ errors := synchronizeReplicas.ValidateRegex()
Expect(errors).To(HaveLen(1))
})
})
})
It("should return no errors on subsequent calls when compile is called multiple times", func() {
- Expect(synchronizeReplicas.compileRegex()).To(BeEmpty())
- Expect(synchronizeReplicas.compileRegex()).To(BeEmpty())
+ Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty())
+ Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty())
})
})
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index 57dae17184..cc14768848 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -87,11 +87,11 @@ const (
// streaming replication purposes
StreamingReplicationUser = "streaming_replica"
- // defaultPostgresUID is the default UID which is used by PostgreSQL
- defaultPostgresUID = 26
+ // DefaultPostgresUID is the default UID which is used by PostgreSQL
+ DefaultPostgresUID = 26
- // defaultPostgresGID is the default GID which is used by PostgreSQL
- defaultPostgresGID = 26
+ // DefaultPostgresGID is the default GID which is used by PostgreSQL
+ DefaultPostgresGID = 26
// PodAntiAffinityTypeRequired is the label for required anti-affinity type
PodAntiAffinityTypeRequired = "required"
diff --git a/api/v1/pooler_webhook.go b/api/v1/pooler_webhook.go
deleted file mode 100644
index b86ac2622b..0000000000
--- a/api/v1/pooler_webhook.go
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
- "fmt"
-
- "github.com/cloudnative-pg/machinery/pkg/log"
- "github.com/cloudnative-pg/machinery/pkg/stringset"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/validation/field"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/webhook"
- "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
-)
-
-var (
- // poolerLog is for logging in this package.
- poolerLog = log.WithName("pooler-resource").WithValues("version", "v1")
-
- // AllowedPgbouncerGenericConfigurationParameters is the list of allowed parameters for PgBouncer
- AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{
- "application_name_add_host",
- "autodb_idle_timeout",
- "cancel_wait_timeout",
- "client_idle_timeout",
- "client_login_timeout",
- "default_pool_size",
- "disable_pqexec",
- "dns_max_ttl",
- "dns_nxdomain_ttl",
- "idle_transaction_timeout",
- "ignore_startup_parameters",
- "listen_backlog",
- "log_connections",
- "log_disconnections",
- "log_pooler_errors",
- "log_stats",
- "max_client_conn",
- "max_db_connections",
- "max_packet_size",
- "max_prepared_statements",
- "max_user_connections",
- "min_pool_size",
- "pkt_buf",
- "query_timeout",
- "query_wait_timeout",
- "reserve_pool_size",
- "reserve_pool_timeout",
- "sbuf_loopcnt",
- "server_check_delay",
- "server_check_query",
- "server_connect_timeout",
- "server_fast_close",
- "server_idle_timeout",
- "server_lifetime",
- "server_login_retry",
- "server_reset_query",
- "server_reset_query_always",
- "server_round_robin",
- "server_tls_ciphers",
- "server_tls_protocols",
- "stats_period",
- "suspend_timeout",
- "tcp_defer_accept",
- "tcp_socket_buffer",
- "tcp_keepalive",
- "tcp_keepcnt",
- "tcp_keepidle",
- "tcp_keepintvl",
- "tcp_user_timeout",
- "track_extra_parameters",
- "verbose",
- })
-)
-
-// SetupWebhookWithManager setup the webhook inside the controller manager
-func (r *Pooler) SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
- For(r).
- Complete()
-}
-
-// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
-// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-pooler,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=poolers,versions=v1,name=vpooler.cnpg.io,sideEffects=None
-
-var _ webhook.Validator = &Pooler{}
-
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
-func (r *Pooler) ValidateCreate() (warns admission.Warnings, err error) {
- poolerLog.Info("validate create", "name", r.Name, "namespace", r.Namespace)
-
- if !r.IsAutomatedIntegration() {
- poolerLog.Info("Pooler not automatically configured, manual configuration required",
- "name", r.Name, "namespace", r.Namespace, "cluster", r.Spec.Cluster.Name)
- warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+
- "Manually configure it as described in the docs.", r.Name, r.Spec.Cluster.Name, r.Namespace))
- }
-
- allErrs := r.Validate()
-
- if len(allErrs) == 0 {
- return warns, nil
- }
-
- return nil, apierrors.NewInvalid(
- schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"},
- r.Name, allErrs)
-}
-
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
-func (r *Pooler) ValidateUpdate(old runtime.Object) (warns admission.Warnings, err error) {
- poolerLog.Info("validate update", "name", r.Name, "namespace", r.Namespace)
-
- oldPooler := old.(*Pooler)
-
- if oldPooler.IsAutomatedIntegration() && !r.IsAutomatedIntegration() {
- poolerLog.Info("Pooler not automatically configured, manual configuration required",
- "name", r.Name, "namespace", r.Namespace, "cluster", r.Spec.Cluster.Name)
- warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+
- "Manually configure it as described in the docs.", r.Name, r.Spec.Cluster.Name, r.Namespace))
- }
-
- allErrs := r.Validate()
- if len(allErrs) == 0 {
- return nil, nil
- }
-
- return warns, apierrors.NewInvalid(
- schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"},
- r.Name, allErrs)
-}
-
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
-func (r *Pooler) ValidateDelete() (admission.Warnings, error) {
- poolerLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace)
- return nil, nil
-}
-
-func (r *Pooler) validatePgBouncer() field.ErrorList {
- var result field.ErrorList
- switch {
- case r.Spec.PgBouncer == nil:
- result = append(result,
- field.Invalid(
- field.NewPath("spec", "pgbouncer"),
- "", "required pgbouncer configuration"))
- case r.Spec.PgBouncer.AuthQuerySecret != nil && r.Spec.PgBouncer.AuthQuerySecret.Name != "" &&
- r.Spec.PgBouncer.AuthQuery == "":
- result = append(result,
- field.Invalid(
- field.NewPath("spec", "pgbouncer", "authQuery"),
- "", "must specify an auth query when providing an auth query secret"))
- case (r.Spec.PgBouncer.AuthQuerySecret == nil || r.Spec.PgBouncer.AuthQuerySecret.Name == "") &&
- r.Spec.PgBouncer.AuthQuery != "":
- result = append(result,
- field.Invalid(
- field.NewPath("spec", "pgbouncer", "authQuerySecret", "name"),
- "", "must specify an existing auth query secret when providing an auth query secret"))
- }
-
- if r.Spec.PgBouncer != nil && len(r.Spec.PgBouncer.Parameters) > 0 {
- result = append(result, r.validatePgbouncerGenericParameters()...)
- }
-
- return result
-}
-
-func (r *Pooler) validateCluster() field.ErrorList {
- var result field.ErrorList
- if r.Spec.Cluster.Name == "" {
- result = append(result,
- field.Invalid(
- field.NewPath("spec", "cluster", "name"),
- "", "must specify a cluster name"))
- }
- if r.Spec.Cluster.Name == r.Name {
- result = append(result,
- field.Invalid(
- field.NewPath("metadata", "name"),
- r.Name, "the pooler resource cannot have the same name of a cluster"))
- }
- return result
-}
-
-// Validate validates the configuration of a Pooler, returning
-// a list of errors
-func (r *Pooler) Validate() (allErrs field.ErrorList) {
- allErrs = append(allErrs, r.validatePgBouncer()...)
- allErrs = append(allErrs, r.validateCluster()...)
- return allErrs
-}
-
-// validatePgbouncerGenericParameters validates pgbouncer parameters
-func (r *Pooler) validatePgbouncerGenericParameters() field.ErrorList {
- var result field.ErrorList
-
- for param := range r.Spec.PgBouncer.Parameters {
- if !AllowedPgbouncerGenericConfigurationParameters.Has(param) {
- result = append(result,
- field.Invalid(
- field.NewPath("spec", "cluster", "parameters"),
- param, "Invalid or reserved parameter"))
- }
- }
- return result
-}
diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go
index d4da0915ea..150192d8d4 100644
--- a/api/v1/scheduledbackup_funcs_test.go
+++ b/api/v1/scheduledbackup_funcs_test.go
@@ -17,8 +17,6 @@ limitations under the License.
package v1
import (
- "k8s.io/utils/ptr"
-
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
. "github.com/onsi/ginkgo/v2"
@@ -68,32 +66,4 @@ var _ = Describe("Scheduled backup", func() {
Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName))
Expect(backup.Spec.Target).To(BeEquivalentTo(BackupTargetPrimary))
})
-
- It("complains if online is set on a barman backup", func() {
- scheduledBackup := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Method: BackupMethodBarmanObjectStore,
- Online: ptr.To(true),
- Schedule: "* * * * * *",
- },
- }
- warnings, result := scheduledBackup.validate()
- Expect(warnings).To(BeEmpty())
- Expect(result).To(HaveLen(1))
- Expect(result[0].Field).To(Equal("spec.online"))
- })
-
- It("complains if onlineConfiguration is set on a barman backup", func() {
- scheduledBackup := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Method: BackupMethodBarmanObjectStore,
- OnlineConfiguration: &OnlineConfiguration{},
- Schedule: "* * * * * *",
- },
- }
- warnings, result := scheduledBackup.validate()
- Expect(warnings).To(BeEmpty())
- Expect(result).To(HaveLen(1))
- Expect(result[0].Field).To(Equal("spec.onlineConfiguration"))
- })
})
diff --git a/api/v1/scheduledbackup_webhook.go b/api/v1/scheduledbackup_webhook.go
deleted file mode 100644
index 30be039614..0000000000
--- a/api/v1/scheduledbackup_webhook.go
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
- "strings"
-
- "github.com/cloudnative-pg/machinery/pkg/log"
- "github.com/robfig/cron"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/validation/field"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/webhook"
- "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
-
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
-)
-
-// scheduledBackupLog is for logging in this package.
-var scheduledBackupLog = log.WithName("scheduledbackup-resource").WithValues("version", "v1")
-
-// SetupWebhookWithManager setup the webhook inside the controller manager
-func (r *ScheduledBackup) SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
- For(r).
- Complete()
-}
-
-// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-scheduledbackup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,verbs=create;update,versions=v1,name=mscheduledbackup.cnpg.io,sideEffects=None
-
-var _ webhook.Defaulter = &ScheduledBackup{}
-
-// Default implements webhook.Defaulter so a webhook will be registered for the type
-func (r *ScheduledBackup) Default() {
- scheduledBackupLog.Info("default", "name", r.Name, "namespace", r.Namespace)
-}
-
-// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
-// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-scheduledbackup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,versions=v1,name=vscheduledbackup.cnpg.io,sideEffects=None
-
-var _ webhook.Validator = &ScheduledBackup{}
-
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
-func (r *ScheduledBackup) ValidateCreate() (admission.Warnings, error) {
- scheduledBackupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace)
-
- warnings, allErrs := r.validate()
- if len(allErrs) == 0 {
- return warnings, nil
- }
-
- return nil, apierrors.NewInvalid(
- schema.GroupKind{Group: "scheduledbackup.cnpg.io", Kind: "Backup"},
- r.Name, allErrs)
-}
-
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
-func (r *ScheduledBackup) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
- scheduledBackupLog.Info("validate update", "name", r.Name, "namespace", r.Namespace)
- return nil, nil
-}
-
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
-func (r *ScheduledBackup) ValidateDelete() (admission.Warnings, error) {
- scheduledBackupLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace)
- return nil, nil
-}
-
-func (r *ScheduledBackup) validate() (admission.Warnings, field.ErrorList) {
- var result field.ErrorList
- var warnings admission.Warnings
-
- if _, err := cron.Parse(r.GetSchedule()); err != nil {
- result = append(result,
- field.Invalid(
- field.NewPath("spec", "schedule"),
- r.Spec.Schedule, err.Error()))
- } else if len(strings.Fields(r.Spec.Schedule)) != 6 {
- warnings = append(
- warnings,
- "Schedule parameter may not have the right number of arguments "+
- "(usually six arguments are needed)",
- )
- }
-
- if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() {
- result = append(result, field.Invalid(
- field.NewPath("spec", "method"),
- r.Spec.Method,
- "Cannot use volumeSnapshot backup method due to missing "+
- "VolumeSnapshot CRD. If you installed the CRD after having "+
- "started the operator, please restart it to enable "+
- "VolumeSnapshot support",
- ))
- }
-
- if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.Online != nil {
- result = append(result, field.Invalid(
- field.NewPath("spec", "online"),
- r.Spec.Online,
- "Online parameter can be specified only if the method is volumeSnapshot",
- ))
- }
-
- if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil {
- result = append(result, field.Invalid(
- field.NewPath("spec", "onlineConfiguration"),
- r.Spec.OnlineConfiguration,
- "OnlineConfiguration parameter can be specified only if the method is volumeSnapshot",
- ))
- }
-
- return warnings, result
-}
diff --git a/api/v1/scheduledbackup_webhook_test.go b/api/v1/scheduledbackup_webhook_test.go
deleted file mode 100644
index 0ef5043a97..0000000000
--- a/api/v1/scheduledbackup_webhook_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Validate schedule", func() {
- It("doesn't complain if there's a schedule", func() {
- schedule := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Schedule: "0 0 0 * * *",
- },
- }
-
- warnings, result := schedule.validate()
- Expect(warnings).To(BeEmpty())
- Expect(result).To(BeEmpty())
- })
-
- It("warn the user if the schedule has a wrong number of arguments", func() {
- schedule := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Schedule: "1 2 3 4 5",
- },
- }
-
- warnings, result := schedule.validate()
- Expect(warnings).To(HaveLen(1))
- Expect(result).To(BeEmpty())
- })
-
- It("complain with a wrong time", func() {
- schedule := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Schedule: "0 0 0 * * * 1996",
- },
- }
-
- warnings, result := schedule.validate()
- Expect(warnings).To(BeEmpty())
- Expect(result).To(HaveLen(1))
- })
-
- It("doesn't complain if VolumeSnapshot CRD is present", func() {
- schedule := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Schedule: "0 0 0 * * *",
- Method: BackupMethodVolumeSnapshot,
- },
- }
- utils.SetVolumeSnapshot(true)
-
- warnings, result := schedule.validate()
- Expect(warnings).To(BeEmpty())
- Expect(result).To(BeEmpty())
- })
-
- It("complains if VolumeSnapshot CRD is not present", func() {
- schedule := &ScheduledBackup{
- Spec: ScheduledBackupSpec{
- Schedule: "0 0 0 * * *",
- Method: BackupMethodVolumeSnapshot,
- },
- }
- utils.SetVolumeSnapshot(false)
- warnings, result := schedule.validate()
- Expect(warnings).To(BeEmpty())
- Expect(result).To(HaveLen(1))
- Expect(result[0].Field).To(Equal("spec.method"))
- })
-})
diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go
index 84b452c908..01ccec5111 100644
--- a/internal/cmd/manager/controller/controller.go
+++ b/internal/cmd/manager/controller/controller.go
@@ -32,12 +32,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
- // +kubebuilder:scaffold:imports
- apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/internal/controller"
schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+ webhookv1 "github.com/cloudnative-pg/cloudnative-pg/internal/webhook/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/multicache"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
@@ -262,22 +261,22 @@ func RunController(
return err
}
- if err = (&apiv1.Cluster{}).SetupWebhookWithManager(mgr); err != nil {
+ if err = webhookv1.SetupClusterWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Cluster", "version", "v1")
return err
}
- if err = (&apiv1.Backup{}).SetupWebhookWithManager(mgr); err != nil {
+ if err = webhookv1.SetupBackupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Backup", "version", "v1")
return err
}
- if err = (&apiv1.ScheduledBackup{}).SetupWebhookWithManager(mgr); err != nil {
+ if err = webhookv1.SetupScheduledBackupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "ScheduledBackup", "version", "v1")
return err
}
- if err = (&apiv1.Pooler{}).SetupWebhookWithManager(mgr); err != nil {
+ if err = webhookv1.SetupPoolerWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Pooler", "version", "v1")
return err
}
diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go
new file mode 100644
index 0000000000..87ed87d1f6
--- /dev/null
+++ b/internal/webhook/v1/backup_webhook.go
@@ -0,0 +1,172 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// backupLog is for logging in this package.
+var backupLog = log.WithName("backup-resource").WithValues("version", "v1")
+
+// SetupBackupWebhookWithManager registers the webhook for Backup in the manager.
+func SetupBackupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Backup{}).
+ WithValidator(&BackupCustomValidator{}).
+ WithDefaulter(&BackupCustomDefaulter{}).
+ Complete()
+}
+
+// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here.
+// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook.
+// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-backup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,verbs=create;update,versions=v1,name=mbackup.cnpg.io,sideEffects=None
+
+// BackupCustomDefaulter struct is responsible for setting default values on the custom resource of the
+// Kind Backup when those are created or updated.
+type BackupCustomDefaulter struct{}
+
+var _ webhook.CustomDefaulter = &BackupCustomDefaulter{}
+
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Backup.
+func (d *BackupCustomDefaulter) Default(_ context.Context, obj runtime.Object) error {
+ backup, ok := obj.(*apiv1.Backup)
+ if !ok {
+ return fmt.Errorf("expected an Backup object but got %T", obj)
+ }
+ backupLog.Info("Defaulting for Backup", "name", backup.GetName(), "namespace", backup.GetNamespace())
+
+ // TODO(user): fill in your defaulting logic.
+
+ return nil
+}
+
+// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
+// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here.
+// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook.
+// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-backup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,versions=v1,name=vbackup.cnpg.io,sideEffects=None
+
+// BackupCustomValidator struct is responsible for validating the Backup resource
+// when it is created, updated, or deleted.
+type BackupCustomValidator struct{}
+
+var _ webhook.CustomValidator = &BackupCustomValidator{}
+
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Backup.
+func (v *BackupCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ backup, ok := obj.(*apiv1.Backup)
+ if !ok {
+ return nil, fmt.Errorf("expected a Backup object but got %T", obj)
+ }
+ backupLog.Info("Validation for Backup upon creation", "name", backup.GetName(), "namespace", backup.GetNamespace())
+
+ allErrs := v.validate(backup)
+ if len(allErrs) == 0 {
+ return nil, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"},
+ backup.Name, allErrs)
+}
+
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Backup.
+func (v *BackupCustomValidator) ValidateUpdate(
+ _ context.Context,
+ _, newObj runtime.Object,
+) (admission.Warnings, error) {
+ backup, ok := newObj.(*apiv1.Backup)
+ if !ok {
+ return nil, fmt.Errorf("expected a Backup object for the newObj but got %T", newObj)
+ }
+ backupLog.Info("Validation for Backup upon update", "name", backup.GetName(), "namespace", backup.GetNamespace())
+
+ allErrs := v.validate(backup)
+ if len(allErrs) == 0 {
+ return nil, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "backup.cnpg.io", Kind: "Backup"},
+ backup.Name, allErrs)
+}
+
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Backup.
+func (v *BackupCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ backup, ok := obj.(*apiv1.Backup)
+ if !ok {
+ return nil, fmt.Errorf("expected a Backup object but got %T", obj)
+ }
+ backupLog.Info("Validation for Backup upon deletion", "name", backup.GetName(), "namespace", backup.GetNamespace())
+
+ // TODO(user): fill in your validation logic upon object deletion.
+
+ return nil, nil
+}
+
+func (v *BackupCustomValidator) validate(r *apiv1.Backup) field.ErrorList {
+ var result field.ErrorList
+
+ if r.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "method"),
+ r.Spec.Method,
+ "Cannot use volumeSnapshot backup method due to missing "+
+ "VolumeSnapshot CRD. If you installed the CRD after having "+
+ "started the operator, please restart it to enable "+
+ "VolumeSnapshot support",
+ ))
+ }
+
+ if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.Online != nil {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "online"),
+ r.Spec.Online,
+ "Online parameter can be specified only if the backup method is volumeSnapshot",
+ ))
+ }
+
+ if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "onlineConfiguration"),
+ r.Spec.OnlineConfiguration,
+ "OnlineConfiguration parameter can be specified only if the backup method is volumeSnapshot",
+ ))
+ }
+
+ if r.Spec.Method == apiv1.BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "pluginConfiguration"),
+ r.Spec.OnlineConfiguration,
+ "cannot be empty when the backup method is plugin",
+ ))
+ }
+
+ return result
+}
diff --git a/api/v1/backup_webhook_test.go b/internal/webhook/v1/backup_webhook_test.go
similarity index 66%
rename from api/v1/backup_webhook_test.go
rename to internal/webhook/v1/backup_webhook_test.go
index d7de85abe3..2ac2fbf883 100644
--- a/api/v1/backup_webhook_test.go
+++ b/internal/webhook/v1/backup_webhook_test.go
@@ -19,6 +19,7 @@ package v1
import (
"k8s.io/utils/ptr"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
@@ -26,49 +27,54 @@ import (
)
var _ = Describe("Backup webhook validate", func() {
+ var v *BackupCustomValidator
+ BeforeEach(func() {
+ v = &BackupCustomValidator{}
+ })
+
It("doesn't complain if VolumeSnapshot CRD is present", func() {
- backup := &Backup{
- Spec: BackupSpec{
- Method: BackupMethodVolumeSnapshot,
+ backup := &apiv1.Backup{
+ Spec: apiv1.BackupSpec{
+ Method: apiv1.BackupMethodVolumeSnapshot,
},
}
utils.SetVolumeSnapshot(true)
- result := backup.validate()
+ result := v.validate(backup)
Expect(result).To(BeEmpty())
})
It("complains if VolumeSnapshot CRD is not present", func() {
- backup := &Backup{
- Spec: BackupSpec{
- Method: BackupMethodVolumeSnapshot,
+ backup := &apiv1.Backup{
+ Spec: apiv1.BackupSpec{
+ Method: apiv1.BackupMethodVolumeSnapshot,
},
}
utils.SetVolumeSnapshot(false)
- result := backup.validate()
+ result := v.validate(backup)
Expect(result).To(HaveLen(1))
Expect(result[0].Field).To(Equal("spec.method"))
})
It("complains if online is set on a barman backup", func() {
- backup := &Backup{
- Spec: BackupSpec{
- Method: BackupMethodBarmanObjectStore,
+ backup := &apiv1.Backup{
+ Spec: apiv1.BackupSpec{
+ Method: apiv1.BackupMethodBarmanObjectStore,
Online: ptr.To(true),
},
}
- result := backup.validate()
+ result := v.validate(backup)
Expect(result).To(HaveLen(1))
Expect(result[0].Field).To(Equal("spec.online"))
})
It("complains if onlineConfiguration is set on a barman backup", func() {
- backup := &Backup{
- Spec: BackupSpec{
- Method: BackupMethodBarmanObjectStore,
- OnlineConfiguration: &OnlineConfiguration{},
+ backup := &apiv1.Backup{
+ Spec: apiv1.BackupSpec{
+ Method: apiv1.BackupMethodBarmanObjectStore,
+ OnlineConfiguration: &apiv1.OnlineConfiguration{},
},
}
- result := backup.validate()
+ result := v.validate(backup)
Expect(result).To(HaveLen(1))
Expect(result[0].Field).To(Equal("spec.onlineConfiguration"))
})
diff --git a/api/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go
similarity index 77%
rename from api/v1/cluster_webhook.go
rename to internal/webhook/v1/cluster_webhook.go
index eec7a01f68..fcba1b0621 100644
--- a/api/v1/cluster_webhook.go
+++ b/internal/webhook/v1/cluster_webhook.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1
import (
+ "context"
"encoding/json"
"fmt"
"slices"
@@ -30,7 +31,7 @@ import (
"github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/cloudnative-pg/machinery/pkg/types"
storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
@@ -43,397 +44,207 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
-const (
- // DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries
- DefaultMonitoringKey = "queries"
- // DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries,
- // if configured
- DefaultMonitoringConfigMapName = "cnpg-default-monitoring"
- // DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries,
- // if configured
- DefaultMonitoringSecretName = DefaultMonitoringConfigMapName
- // DefaultApplicationDatabaseName is the name of application database if not specified
- DefaultApplicationDatabaseName = "app"
- // DefaultApplicationUserName is the name of application database owner if not specified
- DefaultApplicationUserName = DefaultApplicationDatabaseName
-)
-
const sharedBuffersParameter = "shared_buffers"
// clusterLog is for logging in this package.
var clusterLog = log.WithName("cluster-resource").WithValues("version", "v1")
-// SetupWebhookWithManager setup the webhook inside the controller manager
-func (r *Cluster) SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
- For(r).
+// SetupClusterWebhookWithManager registers the webhook for Cluster in the manager.
+func SetupClusterWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Cluster{}).
+ WithValidator(&ClusterCustomValidator{}).
+ WithDefaulter(&ClusterCustomDefaulter{}).
Complete()
}
+// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here.
+// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook.
// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-cluster,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,verbs=create;update,versions=v1,name=mcluster.cnpg.io,sideEffects=None
-var _ webhook.Defaulter = &Cluster{}
-
-// Default implements webhook.Defaulter so a webhook will be registered for the type
-func (r *Cluster) Default() {
- clusterLog.Info("default", "name", r.Name, "namespace", r.Namespace)
-
- r.setDefaults(true)
-}
-
-// SetDefaults apply the defaults to undefined values in a Cluster
-func (r *Cluster) SetDefaults() {
- r.setDefaults(false)
-}
-
-func (r *Cluster) setDefaults(preserveUserSettings bool) {
- // Defaulting the image name if not specified
- if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil {
- r.Spec.ImageName = configuration.Current.PostgresImageName
- }
+// ClusterCustomDefaulter struct is responsible for setting default values on the custom resource of the
+// Kind Cluster when those are created or updated.
+type ClusterCustomDefaulter struct{}
- // Defaulting the bootstrap method if not specified
- if r.Spec.Bootstrap == nil {
- r.Spec.Bootstrap = &BootstrapConfiguration{}
- }
+var _ webhook.CustomDefaulter = &ClusterCustomDefaulter{}
- // Defaulting initDB if no other bootstrap method was passed
- switch {
- case r.Spec.Bootstrap.Recovery != nil:
- r.defaultRecovery()
- case r.Spec.Bootstrap.PgBaseBackup != nil:
- r.defaultPgBaseBackup()
- default:
- r.defaultInitDB()
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Cluster.
+func (d *ClusterCustomDefaulter) Default(_ context.Context, obj runtime.Object) error {
+ cluster, ok := obj.(*apiv1.Cluster)
+ if !ok {
+ return fmt.Errorf("expected an Cluster object but got %T", obj)
}
+ clusterLog.Info("Defaulting for Cluster", "name", cluster.GetName(), "namespace", cluster.GetNamespace())
- // Defaulting the pod anti-affinity type if podAntiAffinity
- if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) &&
- r.Spec.Affinity.PodAntiAffinityType == "" {
- r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred
- }
+ cluster.Default()
- if r.Spec.Backup != nil && r.Spec.Backup.Target == "" {
- r.Spec.Backup.Target = DefaultBackupTarget
- }
-
- psqlVersion, err := r.GetPostgresqlVersion()
- if err == nil {
- // The validation error will be already raised by the
- // validateImageName function
- info := postgres.ConfigurationInfo{
- Settings: postgres.CnpgConfigurationSettings,
- Version: psqlVersion,
- UserSettings: r.Spec.PostgresConfiguration.Parameters,
- IsReplicaCluster: r.IsReplica(),
- PreserveFixedSettingsFromUser: preserveUserSettings,
- IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta),
- IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem,
- }
- sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters()
- r.Spec.PostgresConfiguration.Parameters = sanitizedParameters
- }
-
- if r.Spec.LogLevel == "" {
- r.Spec.LogLevel = log.InfoLevelString
- }
-
- // we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty
- // and defaultQueries not disabled on cluster crd
- if !r.Spec.Monitoring.AreDefaultQueriesDisabled() {
- r.defaultMonitoringQueries(configuration.Current)
- }
+ return nil
+}
- // If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots
- if r.Spec.ReplicationSlots == nil {
- r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{}
- }
- if r.Spec.ReplicationSlots.HighAvailability == nil {
- r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{
- Enabled: ptr.To(true),
- SlotPrefix: "_cnpg_",
- }
- }
- if r.Spec.ReplicationSlots.SynchronizeReplicas == nil {
- r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{
- Enabled: ptr.To(true),
- }
- }
+// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
+// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here.
+// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook.
+// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-cluster,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,versions=v1,name=vcluster.cnpg.io,sideEffects=None
- if len(r.Spec.Tablespaces) > 0 {
- r.defaultTablespaces()
- }
+// ClusterCustomValidator struct is responsible for validating the Cluster resource
+// when it is created, updated, or deleted.
+type ClusterCustomValidator struct{}
- r.setDefaultPlugins(configuration.Current)
-}
+var _ webhook.CustomValidator = &ClusterCustomValidator{}
-func (r *Cluster) setDefaultPlugins(config *configuration.Data) {
- // Add the list of pre-defined plugins
- foundPlugins := stringset.New()
- for _, plugin := range r.Spec.Plugins {
- foundPlugins.Put(plugin.Name)
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Cluster.
+func (v *ClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ cluster, ok := obj.(*apiv1.Cluster)
+ if !ok {
+ return nil, fmt.Errorf("expected a Cluster object but got %T", obj)
}
+ clusterLog.Info("Validation for Cluster upon creation", "name", cluster.GetName(), "namespace", cluster.GetNamespace())
- for _, pluginName := range config.GetIncludePlugins() {
- if !foundPlugins.Has(pluginName) {
- r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{
- Name: pluginName,
- Enabled: ptr.To(true),
- })
- }
- }
-}
+ allErrs := v.validate(cluster)
+ allWarnings := v.getAdmissionWarnings(cluster)
-// defaultTablespaces adds the tablespace owner where the
-// user didn't specify it
-func (r *Cluster) defaultTablespaces() {
- defaultOwner := r.GetApplicationDatabaseOwner()
- if len(defaultOwner) == 0 {
- defaultOwner = "postgres"
+ if len(allErrs) == 0 {
+ return allWarnings, nil
}
- for name, tablespaceConfiguration := range r.Spec.Tablespaces {
- if len(tablespaceConfiguration.Owner.Name) == 0 {
- tablespaceConfiguration.Owner.Name = defaultOwner
- }
- r.Spec.Tablespaces[name] = tablespaceConfiguration
- }
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"},
+ cluster.Name, allErrs)
}
-// defaultMonitoringQueries adds the default monitoring queries configMap
-// if not already present in CustomQueriesConfigMap
-func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) {
- if r.Spec.Monitoring == nil {
- r.Spec.Monitoring = &MonitoringConfiguration{}
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Cluster.
+func (v *ClusterCustomValidator) ValidateUpdate(
+ _ context.Context,
+ oldObj, newObj runtime.Object,
+) (admission.Warnings, error) {
+ cluster, ok := newObj.(*apiv1.Cluster)
+ if !ok {
+ return nil, fmt.Errorf("expected a Cluster object for the newObj but got %T", newObj)
}
- if config.MonitoringQueriesConfigmap != "" {
- var defaultConfigMapQueriesAlreadyPresent bool
- // We check if the default queries are already inserted in the monitoring configuration
- for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap {
- if monitoringConfigMap.Name == DefaultMonitoringConfigMapName {
- defaultConfigMapQueriesAlreadyPresent = true
- break
- }
- }
-
- // If the default queries are already present there is no need to re-add them.
- // Please note that in this case that the default configMap could overwrite user existing queries
- // depending on the order. This is an accepted behavior because the user willingly defined the order of his array
- if !defaultConfigMapQueriesAlreadyPresent {
- r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{
- {
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
- Key: DefaultMonitoringKey,
- },
- }, r.Spec.Monitoring.CustomQueriesConfigMap...)
- }
+ oldCluster, ok := oldObj.(*apiv1.Cluster)
+ if !ok {
+ return nil, fmt.Errorf("expected a Cluster object for the oldObj but got %T", oldObj)
}
- if config.MonitoringQueriesSecret != "" {
- var defaultSecretQueriesAlreadyPresent bool
- // we check if the default queries are already inserted in the monitoring configuration
- for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret {
- if monitoringSecret.Name == DefaultMonitoringSecretName {
- defaultSecretQueriesAlreadyPresent = true
- break
- }
- }
+ clusterLog.Info("Validation for Cluster upon update", "name", cluster.GetName(), "namespace", cluster.GetNamespace())
- if !defaultSecretQueriesAlreadyPresent {
- r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{
- {
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
- Key: DefaultMonitoringKey,
- },
- }, r.Spec.Monitoring.CustomQueriesSecret...)
- }
- }
-}
+ // applying defaults before validating updates to set any new default
+ oldCluster.SetDefaults()
-// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed
-func (r *Cluster) defaultInitDB() {
- if r.Spec.Bootstrap.InitDB == nil {
- r.Spec.Bootstrap.InitDB = &BootstrapInitDB{
- Database: DefaultApplicationDatabaseName,
- Owner: DefaultApplicationUserName,
- }
- }
+ allErrs := append(
+ v.validate(cluster),
+ v.validateClusterChanges(cluster, oldCluster)...,
+ )
- if r.Spec.Bootstrap.InitDB.Database == "" {
- r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName
- }
- if r.Spec.Bootstrap.InitDB.Owner == "" {
- r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database
- }
- if r.Spec.Bootstrap.InitDB.Encoding == "" {
- r.Spec.Bootstrap.InitDB.Encoding = "UTF8"
- }
- if r.Spec.Bootstrap.InitDB.LocaleCollate == "" {
- r.Spec.Bootstrap.InitDB.LocaleCollate = "C"
- }
- if r.Spec.Bootstrap.InitDB.LocaleCType == "" {
- r.Spec.Bootstrap.InitDB.LocaleCType = "C"
+ if len(allErrs) == 0 {
+ return v.getAdmissionWarnings(cluster), nil
}
-}
-// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed
-func (r *Cluster) defaultRecovery() {
- if r.Spec.Bootstrap.Recovery.Database == "" {
- r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName
- }
- if r.Spec.Bootstrap.Recovery.Owner == "" {
- r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database
- }
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"},
+ cluster.Name, allErrs)
}
-// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed
-func (r *Cluster) defaultPgBaseBackup() {
- if r.Spec.Bootstrap.PgBaseBackup.Database == "" {
- r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Cluster.
+func (v *ClusterCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ cluster, ok := obj.(*apiv1.Cluster)
+ if !ok {
+ return nil, fmt.Errorf("expected a Cluster object but got %T", obj)
}
- if r.Spec.Bootstrap.PgBaseBackup.Owner == "" {
- r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database
- }
-}
+ clusterLog.Info("Validation for Cluster upon deletion", "name", cluster.GetName(), "namespace", cluster.GetNamespace())
-// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
-// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-cluster,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,versions=v1,name=vcluster.cnpg.io,sideEffects=None
-
-var _ webhook.Validator = &Cluster{}
-
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
-func (r *Cluster) ValidateCreate() (admission.Warnings, error) {
- clusterLog.Info("validate create", "name", r.Name, "namespace", r.Namespace)
- allErrs := r.Validate()
- allWarnings := r.getAdmissionWarnings()
-
- if len(allErrs) == 0 {
- return allWarnings, nil
- }
+ // TODO(user): fill in your validation logic upon object deletion.
- return nil, apierrors.NewInvalid(
- schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"},
- r.Name, allErrs)
+ return nil, nil
}
-// Validate groups the validation logic for clusters returning a list of all encountered errors
-func (r *Cluster) Validate() (allErrs field.ErrorList) {
- type validationFunc func() field.ErrorList
+// validateCluster groups the validation logic for clusters returning a list of all encountered errors
+func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.ErrorList) {
+ type validationFunc func(*apiv1.Cluster) field.ErrorList
validations := []validationFunc{
- r.validateInitDB,
- r.validateRecoveryApplicationDatabase,
- r.validatePgBaseBackupApplicationDatabase,
- r.validateImport,
- r.validateSuperuserSecret,
- r.validateCerts,
- r.validateBootstrapMethod,
- r.validateImageName,
- r.validateImagePullPolicy,
- r.validateRecoveryTarget,
- r.validatePrimaryUpdateStrategy,
- r.validateMinSyncReplicas,
- r.validateMaxSyncReplicas,
- r.validateStorageSize,
- r.validateWalStorageSize,
- r.validateEphemeralVolumeSource,
- r.validateTablespaceStorageSize,
- r.validateName,
- r.validateTablespaceNames,
- r.validateBootstrapPgBaseBackupSource,
- r.validateTablespaceBackupSnapshot,
- r.validateBootstrapRecoverySource,
- r.validateBootstrapRecoveryDataSource,
- r.validateExternalClusters,
- r.validateTolerations,
- r.validateAntiAffinity,
- r.validateReplicaMode,
- r.validateBackupConfiguration,
- r.validateRetentionPolicy,
- r.validateConfiguration,
- r.validateSynchronousReplicaConfiguration,
- r.validateLDAP,
- r.validateReplicationSlots,
- r.validateEnv,
- r.validateManagedServices,
- r.validateManagedRoles,
- r.validateManagedExtensions,
- r.validateResources,
- r.validateHibernationAnnotation,
- r.validatePromotionToken,
+ v.validateInitDB,
+ v.validateRecoveryApplicationDatabase,
+ v.validatePgBaseBackupApplicationDatabase,
+ v.validateImport,
+ v.validateSuperuserSecret,
+ v.validateCerts,
+ v.validateBootstrapMethod,
+ v.validateImageName,
+ v.validateImagePullPolicy,
+ v.validateRecoveryTarget,
+ v.validatePrimaryUpdateStrategy,
+ v.validateMinSyncReplicas,
+ v.validateMaxSyncReplicas,
+ v.validateStorageSize,
+ v.validateWalStorageSize,
+ v.validateEphemeralVolumeSource,
+ v.validateTablespaceStorageSize,
+ v.validateName,
+ v.validateTablespaceNames,
+ v.validateBootstrapPgBaseBackupSource,
+ v.validateTablespaceBackupSnapshot,
+ v.validateBootstrapRecoverySource,
+ v.validateBootstrapRecoveryDataSource,
+ v.validateExternalClusters,
+ v.validateTolerations,
+ v.validateAntiAffinity,
+ v.validateReplicaMode,
+ v.validateBackupConfiguration,
+ v.validateRetentionPolicy,
+ v.validateConfiguration,
+ v.validateSynchronousReplicaConfiguration,
+ v.validateLDAP,
+ v.validateReplicationSlots,
+ v.validateEnv,
+ v.validateManagedServices,
+ v.validateManagedRoles,
+ v.validateManagedExtensions,
+ v.validateResources,
+ v.validateHibernationAnnotation,
+ v.validatePromotionToken,
}
for _, validate := range validations {
- allErrs = append(allErrs, validate()...)
+ allErrs = append(allErrs, validate(r)...)
}
return allErrs
}
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
-func (r *Cluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
- clusterLog.Info("validate update", "name", r.Name, "namespace", r.Namespace)
- oldCluster := old.(*Cluster)
-
- // applying defaults before validating updates to set any new default
- oldCluster.SetDefaults()
-
- allErrs := append(
- r.Validate(),
- r.ValidateChanges(oldCluster)...,
- )
-
- if len(allErrs) == 0 {
- return r.getAdmissionWarnings(), nil
- }
-
- return nil, apierrors.NewInvalid(
- schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"},
- r.Name, allErrs)
-}
-
-// ValidateChanges groups the validation logic for cluster changes checking the differences between
+// validateClusterChanges groups the validation logic for cluster changes checking the differences between
// the previous version and the new one of the cluster, returning a list of all encountered errors
-func (r *Cluster) ValidateChanges(old *Cluster) (allErrs field.ErrorList) {
+func (v *ClusterCustomValidator) validateClusterChanges(r, old *apiv1.Cluster) (allErrs field.ErrorList) {
if old == nil {
clusterLog.Info("Received invalid old object, skipping old object validation",
"old", old)
return nil
}
- type validationFunc func(old *Cluster) field.ErrorList
+ type validationFunc func(*apiv1.Cluster, *apiv1.Cluster) field.ErrorList
validations := []validationFunc{
- r.validateImageChange,
- r.validateConfigurationChange,
- r.validateStorageChange,
- r.validateWalStorageChange,
- r.validateTablespacesChange,
- r.validateUnixPermissionIdentifierChange,
- r.validateReplicationSlotsChange,
- r.validateWALLevelChange,
- r.validateReplicaClusterChange,
+ v.validateImageChange,
+ v.validateConfigurationChange,
+ v.validateStorageChange,
+ v.validateWalStorageChange,
+ v.validateTablespacesChange,
+ v.validateUnixPermissionIdentifierChange,
+ v.validateReplicationSlotsChange,
+ v.validateWALLevelChange,
+ v.validateReplicaClusterChange,
}
for _, validate := range validations {
- allErrs = append(allErrs, validate(old)...)
+ allErrs = append(allErrs, validate(r, old)...)
}
return allErrs
}
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
-func (r *Cluster) ValidateDelete() (admission.Warnings, error) {
- clusterLog.Info("validate delete", "name", r.Name)
-
- // TODO(user): fill in your validation logic upon object deletion.
- return nil, nil
-}
-
// validateLDAP validates the ldap postgres configuration
-func (r *Cluster) validateLDAP() field.ErrorList {
+func (v *ClusterCustomValidator) validateLDAP(r *apiv1.Cluster) field.ErrorList {
// No validating if not specified
if r.Spec.PostgresConfiguration.LDAP == nil {
return nil
@@ -460,7 +271,7 @@ func (r *Cluster) validateLDAP() field.ErrorList {
}
// validateEnv validate the environment variables settings proposed by the user
-func (r *Cluster) validateEnv() field.ErrorList {
+func (v *ClusterCustomValidator) validateEnv(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
for i := range r.Spec.Env {
@@ -501,7 +312,7 @@ func isReservedEnvironmentVariable(name string) bool {
// validateInitDB validate the bootstrapping options when initdb
// method is used
-func (r *Cluster) validateInitDB() field.ErrorList {
+func (v *ClusterCustomValidator) validateInitDB(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// If it's not configured, everything is ok
@@ -516,7 +327,7 @@ func (r *Cluster) validateInitDB() field.ErrorList {
// If you specify the database name, then you need also to specify the
// owner user and vice-versa
initDBOptions := r.Spec.Bootstrap.InitDB
- result = r.validateApplicationDatabase(initDBOptions.Database, initDBOptions.Owner,
+ result = v.validateApplicationDatabase(initDBOptions.Database, initDBOptions.Owner,
"initdb")
if initDBOptions.WalSegmentSize != 0 && !utils.IsPowerOfTwo(initDBOptions.WalSegmentSize) {
@@ -555,7 +366,7 @@ func (r *Cluster) validateInitDB() field.ErrorList {
return result
}
-func (r *Cluster) validateImport() field.ErrorList {
+func (v *ClusterCustomValidator) validateImport(r *apiv1.Cluster) field.ErrorList {
// If it's not configured, everything is ok
if r.Spec.Bootstrap == nil {
return nil
@@ -571,10 +382,10 @@ func (r *Cluster) validateImport() field.ErrorList {
}
switch importSpec.Type {
- case MicroserviceSnapshotType:
- return importSpec.validateMicroservice()
- case MonolithSnapshotType:
- return importSpec.validateMonolith()
+ case apiv1.MicroserviceSnapshotType:
+ return v.validateMicroservice(importSpec)
+ case apiv1.MonolithSnapshotType:
+ return v.validateMonolith(importSpec)
default:
return field.ErrorList{
field.Invalid(
@@ -585,7 +396,7 @@ func (r *Cluster) validateImport() field.ErrorList {
}
}
-func (s Import) validateMicroservice() field.ErrorList {
+func (v *ClusterCustomValidator) validateMicroservice(s *apiv1.Import) field.ErrorList {
var result field.ErrorList
if len(s.Databases) != 1 {
@@ -621,7 +432,7 @@ func (s Import) validateMicroservice() field.ErrorList {
return result
}
-func (s Import) validateMonolith() field.ErrorList {
+func (v *ClusterCustomValidator) validateMonolith(s *apiv1.Import) field.ErrorList {
var result field.ErrorList
if len(s.Databases) < 1 {
@@ -669,7 +480,7 @@ func (s Import) validateMonolith() field.ErrorList {
// validateRecovery validate the bootstrapping options when Recovery
// method is used
-func (r *Cluster) validateRecoveryApplicationDatabase() field.ErrorList {
+func (v *ClusterCustomValidator) validateRecoveryApplicationDatabase(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// If it's not configured, everything is ok
@@ -682,13 +493,12 @@ func (r *Cluster) validateRecoveryApplicationDatabase() field.ErrorList {
}
recoveryOptions := r.Spec.Bootstrap.Recovery
- return r.validateApplicationDatabase(recoveryOptions.Database, recoveryOptions.Owner,
- "recovery")
+ return v.validateApplicationDatabase(recoveryOptions.Database, recoveryOptions.Owner, "recovery")
}
// validatePgBaseBackup validate the bootstrapping options when pg_basebackup
// method is used
-func (r *Cluster) validatePgBaseBackupApplicationDatabase() field.ErrorList {
+func (v *ClusterCustomValidator) validatePgBaseBackupApplicationDatabase(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// If it's not configured, everything is ok
@@ -701,19 +511,19 @@ func (r *Cluster) validatePgBaseBackupApplicationDatabase() field.ErrorList {
}
pgBaseBackupOptions := r.Spec.Bootstrap.PgBaseBackup
- return r.validateApplicationDatabase(pgBaseBackupOptions.Database, pgBaseBackupOptions.Owner,
+ return v.validateApplicationDatabase(pgBaseBackupOptions.Database, pgBaseBackupOptions.Owner,
"pg_basebackup")
}
// validateApplicationDatabase validate the configuration for application database
-func (r *Cluster) validateApplicationDatabase(
+func (v *ClusterCustomValidator) validateApplicationDatabase(
database string,
owner string,
command string,
) field.ErrorList {
var result field.ErrorList
// If you specify the database name, then you need also to specify the
- // owner user and vice-versa
+ // owner user and vice versa
if database != "" && owner == "" {
result = append(
result,
@@ -734,7 +544,7 @@ func (r *Cluster) validateApplicationDatabase(
}
// validateCerts validate all the provided certs
-func (r *Cluster) validateCerts() field.ErrorList {
+func (v *ClusterCustomValidator) validateCerts(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
certificates := r.Spec.Certificates
@@ -778,7 +588,7 @@ func (r *Cluster) validateCerts() field.ErrorList {
}
// ValidateSuperuserSecret validate super user secret value
-func (r *Cluster) validateSuperuserSecret() field.ErrorList {
+func (v *ClusterCustomValidator) validateSuperuserSecret(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// If empty, we're ok!
@@ -801,7 +611,7 @@ func (r *Cluster) validateSuperuserSecret() field.ErrorList {
// validateBootstrapMethod is used to ensure we have only one
// bootstrap methods active
-func (r *Cluster) validateBootstrapMethod() field.ErrorList {
+func (v *ClusterCustomValidator) validateBootstrapMethod(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// If it's not configured, everything is ok
@@ -834,7 +644,7 @@ func (r *Cluster) validateBootstrapMethod() field.ErrorList {
// validateBootstrapPgBaseBackupSource is used to ensure that the source
// server is correctly defined
-func (r *Cluster) validateBootstrapPgBaseBackupSource() field.ErrorList {
+func (v *ClusterCustomValidator) validateBootstrapPgBaseBackupSource(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// This validation is only applicable for physical backup
@@ -858,7 +668,7 @@ func (r *Cluster) validateBootstrapPgBaseBackupSource() field.ErrorList {
// validateBootstrapRecoverySource is used to ensure that the source
// server is correctly defined
-func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList {
+func (v *ClusterCustomValidator) validateBootstrapRecoverySource(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// This validation is only applicable for recovery based bootstrap
@@ -895,7 +705,7 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList {
// validateBootstrapRecoveryDataSource is used to ensure that the data
// source is correctly defined
-func (r *Cluster) validateBootstrapRecoveryDataSource() field.ErrorList {
+func (v *ClusterCustomValidator) validateBootstrapRecoveryDataSource(r *apiv1.Cluster) field.ErrorList {
// This validation is only applicable for datasource-based recovery based bootstrap
if r.Spec.Bootstrap == nil || r.Spec.Bootstrap.Recovery == nil || r.Spec.Bootstrap.Recovery.VolumeSnapshots == nil {
return nil
@@ -951,7 +761,7 @@ func (r *Cluster) validateBootstrapRecoveryDataSource() field.ErrorList {
// validateVolumeSnapshotSource validates a source of a recovery snapshot.
// The supported resources are VolumeSnapshots and PersistentVolumeClaim
func validateVolumeSnapshotSource(
- value v1.TypedLocalObjectReference,
+ value corev1.TypedLocalObjectReference,
path *field.Path,
) field.ErrorList {
apiGroup := ""
@@ -961,7 +771,7 @@ func validateVolumeSnapshotSource(
switch {
case apiGroup == storagesnapshotv1.GroupName && value.Kind == "VolumeSnapshot":
- case apiGroup == v1.GroupName && value.Kind == "PersistentVolumeClaim":
+ case apiGroup == corev1.GroupName && value.Kind == "PersistentVolumeClaim":
default:
return field.ErrorList{
field.Invalid(path, value, "Only VolumeSnapshots and PersistentVolumeClaims are supported"),
@@ -973,7 +783,7 @@ func validateVolumeSnapshotSource(
// validateImageName validates the image name ensuring we aren't
// using the "latest" tag
-func (r *Cluster) validateImageName() field.ErrorList {
+func (v *ClusterCustomValidator) validateImageName(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.ImageName == "" {
@@ -1014,11 +824,11 @@ func (r *Cluster) validateImageName() field.ErrorList {
// validateImagePullPolicy validates the image pull policy,
// ensuring it is one of "Always", "Never" or "IfNotPresent" when defined
-func (r *Cluster) validateImagePullPolicy() field.ErrorList {
+func (v *ClusterCustomValidator) validateImagePullPolicy(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
switch r.Spec.ImagePullPolicy {
- case v1.PullAlways, v1.PullNever, v1.PullIfNotPresent, "":
+ case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent, "":
return result
default:
return append(
@@ -1027,11 +837,11 @@ func (r *Cluster) validateImagePullPolicy() field.ErrorList {
field.NewPath("spec", "imagePullPolicy"),
r.Spec.ImagePullPolicy,
fmt.Sprintf("invalid imagePullPolicy, if defined must be one of '%s', '%s' or '%s'",
- v1.PullAlways, v1.PullNever, v1.PullIfNotPresent)))
+ corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent)))
}
}
-func (r *Cluster) validateResources() field.ErrorList {
+func (v *ClusterCustomValidator) validateResources(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
cpuRequest := r.Spec.Resources.Requests.Cpu()
@@ -1089,7 +899,7 @@ func (r *Cluster) validateResources() field.ErrorList {
return result
}
-func (r *Cluster) validateSynchronousReplicaConfiguration() field.ErrorList {
+func (v *ClusterCustomValidator) validateSynchronousReplicaConfiguration(r *apiv1.Cluster) field.ErrorList {
if r.Spec.PostgresConfiguration.Synchronous == nil {
return nil
}
@@ -1112,7 +922,7 @@ func (r *Cluster) validateSynchronousReplicaConfiguration() field.ErrorList {
}
// validateConfiguration determines whether a PostgreSQL configuration is valid
-func (r *Cluster) validateConfiguration() field.ErrorList {
+func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
// We cannot have both old-style synchronous replica configuration
@@ -1249,7 +1059,7 @@ func (r *Cluster) validateConfiguration() field.ErrorList {
// validateWalSizeConfiguration verifies that min_wal_size < max_wal_size < wal volume size
func validateWalSizeConfiguration(
- postgresConfig PostgresConfiguration, walVolumeSize *resource.Quantity,
+ postgresConfig apiv1.PostgresConfiguration, walVolumeSize *resource.Quantity,
) field.ErrorList {
const (
minWalSizeKey = "min_wal_size"
@@ -1365,7 +1175,7 @@ func parsePostgresQuantityValue(value string) (resource.Quantity, error) {
// validateConfigurationChange determines whether a PostgreSQL configuration
// change can be applied
-func (r *Cluster) validateConfigurationChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateConfigurationChange(r, old *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if old.Spec.ImageName != r.Spec.ImageName {
@@ -1387,7 +1197,7 @@ func (r *Cluster) validateConfigurationChange(old *Cluster) field.ErrorList {
return result
}
-func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstraints) *field.Error {
+func validateSyncReplicaElectionConstraint(constraints apiv1.SyncReplicaElectionConstraints) *field.Error {
if !constraints.Enabled {
return nil
}
@@ -1406,7 +1216,7 @@ func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstr
// validateImageChange validate the change from a certain image name
// to a new one.
-func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
var newVersion, oldVersion version.Data
var err error
@@ -1451,7 +1261,7 @@ func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList {
// Validate the recovery target to ensure that the mutual exclusivity
// of options is respected and plus validating the format of targetTime
// if specified
-func (r *Cluster) validateRecoveryTarget() field.ErrorList {
+func (v *ClusterCustomValidator) validateRecoveryTarget(r *apiv1.Cluster) field.ErrorList {
if r.Spec.Bootstrap == nil || r.Spec.Bootstrap.Recovery == nil {
return nil
}
@@ -1516,7 +1326,7 @@ func (r *Cluster) validateRecoveryTarget() field.ErrorList {
return result
}
-func validateTargetExclusiveness(recoveryTarget *RecoveryTarget) field.ErrorList {
+func validateTargetExclusiveness(recoveryTarget *apiv1.RecoveryTarget) field.ErrorList {
targets := 0
if recoveryTarget.TargetImmediate != nil {
targets++
@@ -1547,15 +1357,15 @@ func validateTargetExclusiveness(recoveryTarget *RecoveryTarget) field.ErrorList
// Validate the update strategy related to the number of required
// instances
-func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList {
+func (v *ClusterCustomValidator) validatePrimaryUpdateStrategy(r *apiv1.Cluster) field.ErrorList {
if r.Spec.PrimaryUpdateStrategy == "" {
return nil
}
var result field.ErrorList
- if r.Spec.PrimaryUpdateStrategy != PrimaryUpdateStrategySupervised &&
- r.Spec.PrimaryUpdateStrategy != PrimaryUpdateStrategyUnsupervised {
+ if r.Spec.PrimaryUpdateStrategy != apiv1.PrimaryUpdateStrategySupervised &&
+ r.Spec.PrimaryUpdateStrategy != apiv1.PrimaryUpdateStrategyUnsupervised {
result = append(result, field.Invalid(
field.NewPath("spec", "primaryUpdateStrategy"),
r.Spec.PrimaryUpdateStrategy,
@@ -1563,7 +1373,7 @@ func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList {
return result
}
- if r.Spec.PrimaryUpdateStrategy == PrimaryUpdateStrategySupervised && r.Spec.Instances == 1 {
+ if r.Spec.PrimaryUpdateStrategy == apiv1.PrimaryUpdateStrategySupervised && r.Spec.Instances == 1 {
result = append(result, field.Invalid(
field.NewPath("spec", "primaryUpdateStrategy"),
r.Spec.PrimaryUpdateStrategy,
@@ -1576,7 +1386,7 @@ func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList {
// Validate the maximum number of synchronous instances
// that should be kept in sync with the primary server
-func (r *Cluster) validateMaxSyncReplicas() field.ErrorList {
+func (v *ClusterCustomValidator) validateMaxSyncReplicas(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.MaxSyncReplicas < 0 {
@@ -1597,7 +1407,7 @@ func (r *Cluster) validateMaxSyncReplicas() field.ErrorList {
}
// Validate the minimum number of synchronous instances
-func (r *Cluster) validateMinSyncReplicas() field.ErrorList {
+func (v *ClusterCustomValidator) validateMinSyncReplicas(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.MinSyncReplicas < 0 {
@@ -1617,11 +1427,11 @@ func (r *Cluster) validateMinSyncReplicas() field.ErrorList {
return result
}
-func (r *Cluster) validateStorageSize() field.ErrorList {
+func (v *ClusterCustomValidator) validateStorageSize(r *apiv1.Cluster) field.ErrorList {
return validateStorageConfigurationSize(*field.NewPath("spec", "storage"), r.Spec.StorageConfiguration)
}
-func (r *Cluster) validateWalStorageSize() field.ErrorList {
+func (v *ClusterCustomValidator) validateWalStorageSize(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.ShouldCreateWalArchiveVolume() {
@@ -1632,7 +1442,7 @@ func (r *Cluster) validateWalStorageSize() field.ErrorList {
return result
}
-func (r *Cluster) validateEphemeralVolumeSource() field.ErrorList {
+func (v *ClusterCustomValidator) validateEphemeralVolumeSource(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.EphemeralVolumeSource != nil && (r.Spec.EphemeralVolumesSizeLimit != nil &&
@@ -1647,7 +1457,7 @@ func (r *Cluster) validateEphemeralVolumeSource() field.ErrorList {
return result
}
-func (r *Cluster) validateTablespaceStorageSize() field.ErrorList {
+func (v *ClusterCustomValidator) validateTablespaceStorageSize(r *apiv1.Cluster) field.ErrorList {
if r.Spec.Tablespaces == nil {
return nil
}
@@ -1666,7 +1476,7 @@ func (r *Cluster) validateTablespaceStorageSize() field.ErrorList {
func validateStorageConfigurationSize(
structPath field.Path,
- storageConfiguration StorageConfiguration,
+ storageConfiguration apiv1.StorageConfiguration,
) field.ErrorList {
var result field.ErrorList
@@ -1692,7 +1502,7 @@ func validateStorageConfigurationSize(
}
// Validate a change in the storage
-func (r *Cluster) validateStorageChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateStorageChange(r, old *apiv1.Cluster) field.ErrorList {
return validateStorageConfigurationChange(
field.NewPath("spec", "storage"),
old.Spec.StorageConfiguration,
@@ -1700,7 +1510,7 @@ func (r *Cluster) validateStorageChange(old *Cluster) field.ErrorList {
)
}
-func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateWalStorageChange(r, old *apiv1.Cluster) field.ErrorList {
if old.Spec.WalStorage == nil {
return nil
}
@@ -1723,7 +1533,7 @@ func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList {
// validateTablespacesChange checks that no tablespaces have been deleted, and that
// no tablespaces have an invalid storage update
-func (r *Cluster) validateTablespacesChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateTablespacesChange(r, old *apiv1.Cluster) field.ErrorList {
if old.Spec.Tablespaces == nil {
return nil
}
@@ -1760,8 +1570,8 @@ func (r *Cluster) validateTablespacesChange(old *Cluster) field.ErrorList {
// validateStorageConfigurationChange generates an error list by comparing two StorageConfiguration
func validateStorageConfigurationChange(
structPath *field.Path,
- oldStorage StorageConfiguration,
- newStorage StorageConfiguration,
+ oldStorage apiv1.StorageConfiguration,
+ newStorage apiv1.StorageConfiguration,
) field.ErrorList {
oldSize := oldStorage.GetSizeOrNil()
if oldSize == nil {
@@ -1791,7 +1601,7 @@ func validateStorageConfigurationChange(
// Validate the cluster name. This is important to avoid issues
// while generating services, which don't support having dots in
// their name
-func (r *Cluster) validateName() field.ErrorList {
+func (v *ClusterCustomValidator) validateName(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if errs := validationutil.IsDNS1035Label(r.Name); len(errs) > 0 {
@@ -1811,7 +1621,7 @@ func (r *Cluster) validateName() field.ErrorList {
return result
}
-func (r *Cluster) validateTablespaceNames() field.ErrorList {
+func (v *ClusterCustomValidator) validateTablespaceNames(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.Tablespaces == nil {
return nil
@@ -1842,7 +1652,7 @@ func (r *Cluster) validateTablespaceNames() field.ErrorList {
return result
}
-func (r *Cluster) validateTablespaceBackupSnapshot() field.ErrorList {
+func (v *ClusterCustomValidator) validateTablespaceBackupSnapshot(r *apiv1.Cluster) field.ErrorList {
if r.Spec.Backup == nil || r.Spec.Backup.VolumeSnapshot == nil ||
len(r.Spec.Backup.VolumeSnapshot.TablespaceClassName) == 0 {
return nil
@@ -1864,7 +1674,7 @@ func (r *Cluster) validateTablespaceBackupSnapshot() field.ErrorList {
}
// Check if the external clusters list contains two servers with the same name
-func (r *Cluster) validateExternalClusters() field.ErrorList {
+func (v *ClusterCustomValidator) validateExternalClusters(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
stringSet := stringset.New()
@@ -1873,7 +1683,7 @@ func (r *Cluster) validateExternalClusters() field.ErrorList {
stringSet.Put(externalCluster.Name)
result = append(
result,
- r.validateExternalCluster(&r.Spec.ExternalClusters[idx], path)...)
+ v.validateExternalCluster(&r.Spec.ExternalClusters[idx], path)...)
}
if stringSet.Len() != len(r.Spec.ExternalClusters) {
@@ -1887,7 +1697,10 @@ func (r *Cluster) validateExternalClusters() field.ErrorList {
}
// validateExternalCluster check the validity of a certain ExternalCluster
-func (r *Cluster) validateExternalCluster(externalCluster *ExternalCluster, path *field.Path) field.ErrorList {
+func (v *ClusterCustomValidator) validateExternalCluster(
+ externalCluster *apiv1.ExternalCluster,
+ path *field.Path,
+) field.ErrorList {
var result field.ErrorList
if externalCluster.ConnectionParameters == nil &&
@@ -1903,7 +1716,7 @@ func (r *Cluster) validateExternalCluster(externalCluster *ExternalCluster, path
return result
}
-func (r *Cluster) validateReplicaClusterChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateReplicaClusterChange(r, old *apiv1.Cluster) field.ErrorList {
// If the replication role didn't change then everything
// is fine
if r.IsReplica() == old.IsReplica() {
@@ -1924,7 +1737,7 @@ func (r *Cluster) validateReplicaClusterChange(old *Cluster) field.ErrorList {
return nil
}
-func (r *Cluster) validateUnixPermissionIdentifierChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateUnixPermissionIdentifierChange(r, old *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.PostgresGID != old.Spec.PostgresGID {
@@ -1944,7 +1757,7 @@ func (r *Cluster) validateUnixPermissionIdentifierChange(old *Cluster) field.Err
return result
}
-func (r *Cluster) validatePromotionToken() field.ErrorList {
+func (v *ClusterCustomValidator) validatePromotionToken(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.ReplicaCluster == nil {
@@ -2000,7 +1813,7 @@ func (r *Cluster) validatePromotionToken() field.ErrorList {
// Check if the replica mode is used with an incompatible bootstrap
// method
-func (r *Cluster) validateReplicaMode() field.ErrorList {
+func (v *ClusterCustomValidator) validateReplicaMode(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
replicaClusterConf := r.Spec.ReplicaCluster
@@ -2037,12 +1850,12 @@ func (r *Cluster) validateReplicaMode() field.ErrorList {
}
}
- result = append(result, r.validateReplicaClusterExternalClusters()...)
+ result = append(result, v.validateReplicaClusterExternalClusters(r)...)
return result
}
-func (r *Cluster) validateReplicaClusterExternalClusters() field.ErrorList {
+func (v *ClusterCustomValidator) validateReplicaClusterExternalClusters(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
replicaClusterConf := r.Spec.ReplicaCluster
if replicaClusterConf == nil {
@@ -2089,7 +1902,7 @@ func (r *Cluster) validateReplicaClusterExternalClusters() field.ErrorList {
// validateTolerations check and validate the tolerations field
// This code is almost a verbatim copy of
// https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3147
-func (r *Cluster) validateTolerations() field.ErrorList {
+func (v *ClusterCustomValidator) validateTolerations(r *apiv1.Cluster) field.ErrorList {
path := field.NewPath("spec", "affinity", "toleration")
allErrors := field.ErrorList{}
for i, toleration := range r.Spec.Affinity.Tolerations {
@@ -2100,14 +1913,14 @@ func (r *Cluster) validateTolerations() field.ErrorList {
}
// empty toleration key with Exists operator and empty value means match all taints
- if len(toleration.Key) == 0 && toleration.Operator != v1.TolerationOpExists {
+ if len(toleration.Key) == 0 && toleration.Operator != corev1.TolerationOpExists {
allErrors = append(allErrors,
field.Invalid(idxPath.Child("operator"),
toleration.Operator,
"operator must be Exists when `key` is empty, which means \"match all values and all keys\""))
}
- if toleration.TolerationSeconds != nil && toleration.Effect != v1.TaintEffectNoExecute {
+ if toleration.TolerationSeconds != nil && toleration.Effect != corev1.TaintEffectNoExecute {
allErrors = append(allErrors,
field.Invalid(idxPath.Child("effect"),
toleration.Effect,
@@ -2117,20 +1930,20 @@ func (r *Cluster) validateTolerations() field.ErrorList {
// validate toleration operator and value
switch toleration.Operator {
// empty operator means Equal
- case v1.TolerationOpEqual, "":
+ case corev1.TolerationOpEqual, "":
if errs := validationutil.IsValidLabelValue(toleration.Value); len(errs) != 0 {
allErrors = append(allErrors,
field.Invalid(idxPath.Child("operator"),
toleration.Value, strings.Join(errs, ";")))
}
- case v1.TolerationOpExists:
+ case corev1.TolerationOpExists:
if len(toleration.Value) > 0 {
allErrors = append(allErrors,
field.Invalid(idxPath.Child("operator"),
toleration, "value must be empty when `operator` is 'Exists'"))
}
default:
- validValues := []string{string(v1.TolerationOpEqual), string(v1.TolerationOpExists)}
+ validValues := []string{string(corev1.TolerationOpEqual), string(corev1.TolerationOpExists)}
allErrors = append(allErrors,
field.NotSupported(idxPath.Child("operator"),
toleration.Operator, validValues))
@@ -2147,7 +1960,7 @@ func (r *Cluster) validateTolerations() field.ErrorList {
// validateTaintEffect is used from validateTollerations and is a verbatim copy of the code
// at https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3087
-func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
+func validateTaintEffect(effect *corev1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
if !allowEmpty && len(*effect) == 0 {
return field.ErrorList{field.Required(fldPath, "")}
}
@@ -2155,14 +1968,14 @@ func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field
allErrors := field.ErrorList{}
switch *effect {
// TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit.
- case v1.TaintEffectNoSchedule, v1.TaintEffectPreferNoSchedule, v1.TaintEffectNoExecute:
+ case corev1.TaintEffectNoSchedule, corev1.TaintEffectPreferNoSchedule, corev1.TaintEffectNoExecute:
// case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit,
// core.TaintEffectNoExecute:
default:
validValues := []string{
- string(v1.TaintEffectNoSchedule),
- string(v1.TaintEffectPreferNoSchedule),
- string(v1.TaintEffectNoExecute),
+ string(corev1.TaintEffectNoSchedule),
+ string(corev1.TaintEffectPreferNoSchedule),
+ string(corev1.TaintEffectNoExecute),
// TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit.
// string(core.TaintEffectNoScheduleNoAdmit),
}
@@ -2172,25 +1985,25 @@ func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field
}
// validateAntiAffinity checks and validates the anti-affinity fields.
-func (r *Cluster) validateAntiAffinity() field.ErrorList {
+func (v *ClusterCustomValidator) validateAntiAffinity(r *apiv1.Cluster) field.ErrorList {
path := field.NewPath("spec", "affinity", "podAntiAffinityType")
allErrors := field.ErrorList{}
- if r.Spec.Affinity.PodAntiAffinityType != PodAntiAffinityTypePreferred &&
- r.Spec.Affinity.PodAntiAffinityType != PodAntiAffinityTypeRequired &&
+ if r.Spec.Affinity.PodAntiAffinityType != apiv1.PodAntiAffinityTypePreferred &&
+ r.Spec.Affinity.PodAntiAffinityType != apiv1.PodAntiAffinityTypeRequired &&
r.Spec.Affinity.PodAntiAffinityType != "" {
allErrors = append(allErrors, field.Invalid(
path,
r.Spec.Affinity.PodAntiAffinityType,
fmt.Sprintf("pod anti-affinity type must be '%s' (default if empty) or '%s'",
- PodAntiAffinityTypePreferred, PodAntiAffinityTypeRequired),
+ apiv1.PodAntiAffinityTypePreferred, apiv1.PodAntiAffinityTypeRequired),
))
}
return allErrors
}
// validateBackupConfiguration validates the backup configuration
-func (r *Cluster) validateBackupConfiguration() field.ErrorList {
+func (v *ClusterCustomValidator) validateBackupConfiguration(r *apiv1.Cluster) field.ErrorList {
if r.Spec.Backup == nil {
return nil
}
@@ -2201,7 +2014,7 @@ func (r *Cluster) validateBackupConfiguration() field.ErrorList {
}
// validateRetentionPolicy validates the retention policy configuration
-func (r *Cluster) validateRetentionPolicy() field.ErrorList {
+func (v *ClusterCustomValidator) validateRetentionPolicy(r *apiv1.Cluster) field.ErrorList {
if r.Spec.Backup == nil {
return nil
}
@@ -2211,13 +2024,13 @@ func (r *Cluster) validateRetentionPolicy() field.ErrorList {
)
}
-func (r *Cluster) validateReplicationSlots() field.ErrorList {
+func (v *ClusterCustomValidator) validateReplicationSlots(r *apiv1.Cluster) field.ErrorList {
if r.Spec.ReplicationSlots == nil {
- r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ r.Spec.ReplicationSlots = &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
},
- SynchronizeReplicas: &SynchronizeReplicasConfiguration{
+ SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{
Enabled: ptr.To(true),
},
}
@@ -2228,7 +2041,7 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList {
return nil
}
- if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.compileRegex(); len(errs) > 0 {
+ if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.ValidateRegex(); len(errs) > 0 {
return field.ErrorList{
field.Invalid(
field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "excludePatterns"),
@@ -2240,7 +2053,7 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList {
return nil
}
-func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateReplicationSlotsChange(r, old *apiv1.Cluster) field.ErrorList {
newReplicationSlots := r.Spec.ReplicationSlots
oldReplicationSlots := old.Spec.ReplicationSlots
@@ -2276,7 +2089,7 @@ func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList {
return errs
}
-func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList {
+func (v *ClusterCustomValidator) validateWALLevelChange(r, old *apiv1.Cluster) field.ErrorList {
var errs field.ErrorList
newWALLevel := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLevel]
@@ -2293,7 +2106,7 @@ func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList {
return errs
}
-func (r *Cluster) validateManagedServices() field.ErrorList {
+func (v *ClusterCustomValidator) validateManagedServices(r *apiv1.Cluster) field.ErrorList {
reservedNames := []string{
r.GetServiceReadWriteName(),
r.GetServiceReadOnlyName(),
@@ -2319,10 +2132,10 @@ func (r *Cluster) validateManagedServices() field.ErrorList {
basePath := field.NewPath("spec", "managed", "services")
var errs field.ErrorList
- if slices.Contains(managedServices.DisabledDefaultServices, ServiceSelectorTypeRW) {
+ if slices.Contains(managedServices.DisabledDefaultServices, apiv1.ServiceSelectorTypeRW) {
errs = append(errs, field.Invalid(
basePath.Child("disabledDefaultServices"),
- ServiceSelectorTypeRW,
+ apiv1.ServiceSelectorTypeRW,
"service of type RW cannot be disabled.",
))
}
@@ -2366,7 +2179,7 @@ func (r *Cluster) validateManagedServices() field.ErrorList {
func validateServiceTemplate(
path *field.Path,
nameRequired bool,
- template ServiceTemplateSpec,
+ template apiv1.ServiceTemplateSpec,
) field.ErrorList {
var errs field.ErrorList
@@ -2386,7 +2199,7 @@ func validateServiceTemplate(
}
// validateManagedRoles validate the environment variables settings proposed by the user
-func (r *Cluster) validateManagedRoles() field.ErrorList {
+func (v *ClusterCustomValidator) validateManagedRoles(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
if r.Spec.Managed == nil {
@@ -2435,14 +2248,14 @@ func (r *Cluster) validateManagedRoles() field.ErrorList {
}
// validateManagedExtensions validate the managed extensions parameters set by the user
-func (r *Cluster) validateManagedExtensions() field.ErrorList {
+func (v *ClusterCustomValidator) validateManagedExtensions(r *apiv1.Cluster) field.ErrorList {
allErrors := field.ErrorList{}
- allErrors = append(allErrors, r.validatePgFailoverSlots()...)
+ allErrors = append(allErrors, v.validatePgFailoverSlots(r)...)
return allErrors
}
-func (r *Cluster) validatePgFailoverSlots() field.ErrorList {
+func (v *ClusterCustomValidator) validatePgFailoverSlots(r *apiv1.Cluster) field.ErrorList {
var result field.ErrorList
var pgFailoverSlots postgres.ManagedExtension
@@ -2502,11 +2315,11 @@ func (r *Cluster) validatePgFailoverSlots() field.ErrorList {
return result
}
-func (r *Cluster) getAdmissionWarnings() admission.Warnings {
- return r.getMaintenanceWindowsAdmissionWarnings()
+func (v *ClusterCustomValidator) getAdmissionWarnings(r *apiv1.Cluster) admission.Warnings {
+ return getMaintenanceWindowsAdmissionWarnings(r)
}
-func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings {
+func getMaintenanceWindowsAdmissionWarnings(r *apiv1.Cluster) admission.Warnings {
var result admission.Warnings
if r.Spec.NodeMaintenanceWindow != nil {
@@ -2518,7 +2331,7 @@ func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings {
}
// validate whether the hibernation configuration is valid
-func (r *Cluster) validateHibernationAnnotation() field.ErrorList {
+func (v *ClusterCustomValidator) validateHibernationAnnotation(r *apiv1.Cluster) field.ErrorList {
value, ok := r.Annotations[utils.HibernationAnnotationName]
isKnownValue := value == string(utils.HibernationAnnotationValueOn) ||
value == string(utils.HibernationAnnotationValueOff)
diff --git a/api/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go
similarity index 55%
rename from api/v1/cluster_webhook_test.go
rename to internal/webhook/v1/cluster_webhook_test.go
index bd08a26511..faa1f0f0d7 100644
--- a/api/v1/cluster_webhook_test.go
+++ b/internal/webhook/v1/cluster_webhook_test.go
@@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/ptr"
- "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
@@ -42,136 +42,154 @@ import (
)
var _ = Describe("bootstrap methods validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if there isn't a configuration", func() {
- emptyCluster := &Cluster{}
- result := emptyCluster.validateBootstrapMethod()
+ emptyCluster := &apiv1.Cluster{}
+ result := v.validateBootstrapMethod(emptyCluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we are using initdb", func() {
- initdbCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ initdbCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
},
}
- result := initdbCluster.validateBootstrapMethod()
+ result := v.validateBootstrapMethod(initdbCluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we are using recovery", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{},
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{},
},
},
}
- result := recoveryCluster.validateBootstrapMethod()
+ result := v.validateBootstrapMethod(recoveryCluster)
Expect(result).To(BeEmpty())
})
It("complains where there are two active bootstrap methods", func() {
- invalidCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{},
- InitDB: &BootstrapInitDB{},
+ invalidCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{},
+ InitDB: &apiv1.BootstrapInitDB{},
},
},
}
- result := invalidCluster.validateBootstrapMethod()
+ result := v.validateBootstrapMethod(invalidCluster)
Expect(result).To(HaveLen(1))
})
})
var _ = Describe("certificates options validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if there isn't a configuration", func() {
- emptyCluster := &Cluster{}
- result := emptyCluster.validateCerts()
+ emptyCluster := &apiv1.Cluster{}
+ result := v.validateCerts(emptyCluster)
Expect(result).To(BeEmpty())
})
+
It("doesn't complain if you specify some valid secret names", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Certificates: &CertificatesConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Certificates: &apiv1.CertificatesConfiguration{
ServerCASecret: "test-server-ca",
ServerTLSSecret: "test-server-tls",
},
},
}
- result := cluster.validateCerts()
+ result := v.validateCerts(cluster)
Expect(result).To(BeEmpty())
})
+
It("does complain if you specify the TLS secret and not the CA", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Certificates: &CertificatesConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Certificates: &apiv1.CertificatesConfiguration{
ServerTLSSecret: "test-server-tls",
},
},
}
- result := cluster.validateCerts()
+ result := v.validateCerts(cluster)
Expect(result).To(HaveLen(1))
})
+
It("does complain if you specify the TLS secret and AltDNSNames is not empty", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Certificates: &CertificatesConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Certificates: &apiv1.CertificatesConfiguration{
ServerCASecret: "test-server-ca",
ServerTLSSecret: "test-server-tls",
ServerAltDNSNames: []string{"dns-name"},
},
},
}
- result := cluster.validateCerts()
+ result := v.validateCerts(cluster)
Expect(result).To(HaveLen(1))
})
})
var _ = Describe("initdb options validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if there isn't a configuration", func() {
- emptyCluster := &Cluster{}
- result := emptyCluster.validateInitDB()
+ emptyCluster := &apiv1.Cluster{}
+ result := v.validateInitDB(emptyCluster)
Expect(result).To(BeEmpty())
})
It("complains if you specify the database name but not the owner", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
},
},
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(HaveLen(1))
})
It("complains if you specify the owner but not the database name", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Owner: "app",
},
},
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(HaveLen(1))
})
It("doesn't complain if you specify both database name and owner user", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
},
@@ -179,21 +197,21 @@ var _ = Describe("initdb options validation", func() {
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(BeEmpty())
})
It("complain if key is missing in the secretRefs", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- PostInitApplicationSQLRefs: &SQLRefs{
- SecretRefs: []SecretKeySelector{
+ PostInitApplicationSQLRefs: &apiv1.SQLRefs{
+ SecretRefs: []apiv1.SecretKeySelector{
{
- LocalObjectReference: LocalObjectReference{Name: "secret1"},
+ LocalObjectReference: apiv1.LocalObjectReference{Name: "secret1"},
},
},
},
@@ -202,19 +220,19 @@ var _ = Describe("initdb options validation", func() {
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(HaveLen(1))
})
It("complain if name is missing in the secretRefs", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- PostInitApplicationSQLRefs: &SQLRefs{
- SecretRefs: []SecretKeySelector{
+ PostInitApplicationSQLRefs: &apiv1.SQLRefs{
+ SecretRefs: []apiv1.SecretKeySelector{
{
Key: "key",
},
@@ -225,21 +243,21 @@ var _ = Describe("initdb options validation", func() {
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(HaveLen(1))
})
It("complain if key is missing in the configMapRefs", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- PostInitApplicationSQLRefs: &SQLRefs{
- ConfigMapRefs: []ConfigMapKeySelector{
+ PostInitApplicationSQLRefs: &apiv1.SQLRefs{
+ ConfigMapRefs: []apiv1.ConfigMapKeySelector{
{
- LocalObjectReference: LocalObjectReference{Name: "configmap1"},
+ LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap1"},
},
},
},
@@ -248,19 +266,19 @@ var _ = Describe("initdb options validation", func() {
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(HaveLen(1))
})
It("complain if name is missing in the configMapRefs", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- PostInitApplicationSQLRefs: &SQLRefs{
- ConfigMapRefs: []ConfigMapKeySelector{
+ PostInitApplicationSQLRefs: &apiv1.SQLRefs{
+ ConfigMapRefs: []apiv1.ConfigMapKeySelector{
{
Key: "key",
},
@@ -271,35 +289,35 @@ var _ = Describe("initdb options validation", func() {
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(HaveLen(1))
})
It("doesn't complain if configmapRefs and secretRefs are valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- PostInitApplicationSQLRefs: &SQLRefs{
- ConfigMapRefs: []ConfigMapKeySelector{
+ PostInitApplicationSQLRefs: &apiv1.SQLRefs{
+ ConfigMapRefs: []apiv1.ConfigMapKeySelector{
{
- LocalObjectReference: LocalObjectReference{Name: "configmap1"},
+ LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap1"},
Key: "key",
},
{
- LocalObjectReference: LocalObjectReference{Name: "configmap2"},
+ LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap2"},
Key: "key",
},
},
- SecretRefs: []SecretKeySelector{
+ SecretRefs: []apiv1.SecretKeySelector{
{
- LocalObjectReference: LocalObjectReference{Name: "secret1"},
+ LocalObjectReference: apiv1.LocalObjectReference{Name: "secret1"},
Key: "key",
},
{
- LocalObjectReference: LocalObjectReference{Name: "secret2"},
+ LocalObjectReference: apiv1.LocalObjectReference{Name: "secret2"},
Key: "key",
},
},
@@ -309,303 +327,157 @@ var _ = Describe("initdb options validation", func() {
},
}
- result := cluster.validateInitDB()
+ result := v.validateInitDB(cluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if superuser secret it's empty", func() {
- cluster := Cluster{
- Spec: ClusterSpec{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- result := cluster.validateSuperuserSecret()
+ result := v.validateSuperuserSecret(cluster)
Expect(result).To(BeEmpty())
})
It("complains if superuser secret name it's empty", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- SuperuserSecret: &LocalObjectReference{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ SuperuserSecret: &apiv1.LocalObjectReference{
Name: "",
},
},
}
- result := cluster.validateSuperuserSecret()
+ result := v.validateSuperuserSecret(cluster)
Expect(result).To(HaveLen(1))
})
})
-var _ = Describe("cluster configuration", func() {
- It("defaults to creating an application database", func() {
- cluster := Cluster{}
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app"))
- Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app"))
- })
-
- It("defaults the owner user with the database name", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
- Database: "appdb",
- },
- },
- },
- }
-
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb"))
- })
-
- It("defaults to create an application database if recovery is used", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{},
- },
- },
- }
- cluster.Default()
- Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue())
- Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty())
- Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty())
- Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil())
- })
-
- It("defaults the owner user with the database name for recovery", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- Database: "appdb",
- },
- },
- },
- }
-
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb"))
- })
-
- It("defaults to create an application database if pg_basebackup is used", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
- },
- },
- }
- cluster.Default()
- Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue())
- Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty())
- Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty())
- Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil())
- })
-
- It("defaults the owner user with the database name for pg_basebackup", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{
- Database: "appdb",
- },
- },
- },
- }
-
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb"))
- })
-
- It("defaults the PostgreSQL configuration with parameters from the operator", func() {
- cluster := Cluster{}
- cluster.Default()
- Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty())
- })
-
- It("defaults the anti-affinity", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{},
- },
- }
- cluster.Default()
- Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred))
- Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil())
+var _ = Describe("ImagePullPolicy validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
})
-})
-var _ = Describe("ImagePullPolicy validation", func() {
It("complains if the imagePullPolicy isn't valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImagePullPolicy: "wrong",
},
}
- result := cluster.validateImagePullPolicy()
+ result := v.validateImagePullPolicy(cluster)
Expect(result).To(HaveLen(1))
})
+
It("does not complain if the imagePullPolicy is valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImagePullPolicy: "Always",
},
}
- result := cluster.validateImagePullPolicy()
+ result := v.validateImagePullPolicy(cluster)
Expect(result).To(BeEmpty())
})
})
-var _ = Describe("Defaulting webhook", func() {
- It("should fill the image name if isn't already set", func() {
- cluster := Cluster{}
- cluster.Default()
- Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName))
- })
-
- It("shouldn't set the image name if already present", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- ImageName: "test:13",
- },
- }
- cluster.Default()
- Expect(cluster.Spec.ImageName).To(Equal("test:13"))
- })
-
- It("should setup the application database name", func() {
- cluster := Cluster{}
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app"))
- Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app"))
- })
-
- It("should set the owner name as the database name", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
- Database: "test",
- },
- },
- },
- }
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test"))
- Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test"))
- })
-
- It("should not overwrite application database and owner settings", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
- Database: "testdb",
- Owner: "testuser",
- },
- },
- },
- }
- cluster.Default()
- Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb"))
- Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser"))
+var _ = Describe("Image name validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
})
-})
-var _ = Describe("Image name validation", func() {
It("doesn't complain if the user simply accept the default", func() {
- var cluster Cluster
- Expect(cluster.validateImageName()).To(BeEmpty())
+ var cluster apiv1.Cluster
+ Expect(v.validateImageName(&cluster)).To(BeEmpty())
// Let's apply the defaulting webhook, too
cluster.Default()
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(&cluster)).To(BeEmpty())
})
It("complains when the 'latest' tag is detected", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:latest",
},
}
- Expect(cluster.validateImageName()).To(HaveLen(1))
+ Expect(v.validateImageName(cluster)).To(HaveLen(1))
})
It("doesn't complain when a alpha tag is used", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:15alpha1",
},
}
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(cluster)).To(BeEmpty())
})
It("doesn't complain when a beta tag is used", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:15beta1",
},
}
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(cluster)).To(BeEmpty())
})
It("doesn't complain when a release candidate tag is used", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:15rc1",
},
}
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(cluster)).To(BeEmpty())
})
It("complains when only the sha is passed", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866",
},
}
- Expect(cluster.validateImageName()).To(HaveLen(1))
+ Expect(v.validateImageName(cluster)).To(HaveLen(1))
})
It("doesn't complain if the tag is valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.4",
},
}
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(cluster)).To(BeEmpty())
})
It("doesn't complain if the tag is valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:14.4-1",
},
}
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(cluster)).To(BeEmpty())
})
It("doesn't complain if the tag is valid and has sha", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.4@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866",
},
}
- Expect(cluster.validateImageName()).To(BeEmpty())
+ Expect(v.validateImageName(cluster)).To(BeEmpty())
})
It("complain when the tag name is not a PostgreSQL version", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:test_12",
},
}
- Expect(cluster.validateImageName()).To(HaveLen(1))
+ Expect(v.validateImageName(cluster)).To(HaveLen(1))
})
})
@@ -634,329 +506,334 @@ var _ = DescribeTable("parsePostgresQuantityValue",
)
var _ = Describe("configuration change validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain when the configuration is exactly the same", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.4",
},
}
- clusterNew := clusterOld
- Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(BeEmpty())
+ clusterNew := clusterOld.DeepCopy()
+ Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty())
})
It("doesn't complain when we change a setting which is not fixed", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.4",
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.4",
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"shared_buffers": "4G",
},
},
},
}
- Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty())
})
It("complains when changing postgres major version and settings", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.4",
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:10.5",
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"shared_buffers": "4G",
},
},
},
}
- Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(HaveLen(1))
})
It("produces no error when WAL size settings are correct", func() {
- clusterNew := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "80MB",
"max_wal_size": "1024",
},
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "1500",
"max_wal_size": "2 GB",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "3Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "1.5GB",
"max_wal_size": "2000",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"max_wal_size": "1GB",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "100MB",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{},
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
})
It("produces one complaint when min_wal_size is bigger than max_wal_size", func() {
- clusterNew := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "1500",
"max_wal_size": "1GB",
},
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "2Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "2G",
"max_wal_size": "1GB",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "4Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
})
It("produces one complaint when max_wal_size is bigger than WAL storage", func() {
- clusterNew := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"max_wal_size": "2GB",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "1G",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "4Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "80MB",
"max_wal_size": "1500",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "1G",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "4Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
})
It("produces two complaints when min_wal_size is bigger than WAL storage and max_wal_size", func() {
- clusterNew := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "3GB",
"max_wal_size": "1GB",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(2))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(2))
})
It("complains about invalid value for min_wal_size and max_wal_size", func() {
- clusterNew := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "xxx",
"max_wal_size": "1GB",
},
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "80",
"max_wal_size": "1Gb",
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
})
It("doesn't compare default values for min_wal_size and max_wal_size with WalStorage", func() {
- clusterNew := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "100Mi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(clusterNew)).To(BeEmpty())
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"min_wal_size": "1.5GB", // default for max_wal_size is 1GB
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
- clusterNew = Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ clusterNew = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"max_wal_size": "70M", // default for min_wal_size is 80M
},
},
- WalStorage: &StorageConfiguration{
+ WalStorage: &apiv1.StorageConfiguration{
Size: "2Gi",
},
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "4Gi",
},
},
}
- Expect(clusterNew.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1))
})
It("should detect an invalid `shared_buffers` value", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"shared_buffers": "invalid",
},
@@ -964,20 +841,20 @@ var _ = Describe("configuration change validation", func() {
},
}
- Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(cluster)).To(HaveLen(1))
})
It("should reject minimal wal_level when backup is configured", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{
- BarmanObjectStore: &BarmanObjectStoreConfiguration{
- BarmanCredentials: BarmanCredentials{
- AWS: &S3Credentials{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{
+ BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{
+ BarmanCredentials: apiv1.BarmanCredentials{
+ AWS: &apiv1.S3Credentials{},
},
},
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -986,20 +863,20 @@ var _ = Describe("configuration change validation", func() {
},
}
Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue())
- Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(cluster)).To(HaveLen(1))
})
It("should allow replica wal_level when backup is configured", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{
- BarmanObjectStore: &BarmanObjectStoreConfiguration{
- BarmanCredentials: BarmanCredentials{
- AWS: &S3Credentials{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{
+ BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{
+ BarmanCredentials: apiv1.BarmanCredentials{
+ AWS: &apiv1.S3Credentials{},
},
},
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "replica",
},
@@ -1007,20 +884,20 @@ var _ = Describe("configuration change validation", func() {
},
}
Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue())
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should allow logical wal_level when backup is configured", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{
- BarmanObjectStore: &BarmanObjectStoreConfiguration{
- BarmanCredentials: BarmanCredentials{
- AWS: &S3Credentials{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{
+ BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{
+ BarmanCredentials: apiv1.BarmanCredentials{
+ AWS: &apiv1.S3Credentials{},
},
},
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "logical",
},
@@ -1028,14 +905,14 @@ var _ = Describe("configuration change validation", func() {
},
}
Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue())
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should reject minimal wal_level when instances is greater than one", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 2,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -1044,42 +921,42 @@ var _ = Describe("configuration change validation", func() {
},
}
- Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(cluster)).To(HaveLen(1))
})
It("should allow replica wal_level when instances is greater than one", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 2,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "replica",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should allow logical wal_level when instances is greater than one", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 2,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "logical",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should reject an unknown wal_level value", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "test",
},
@@ -1087,19 +964,19 @@ var _ = Describe("configuration change validation", func() {
},
}
- errs := cluster.validateConfiguration()
+ errs := v.validateConfiguration(cluster)
Expect(errs).To(HaveLen(1))
Expect(errs[0].Detail).To(ContainSubstring("unrecognized `wal_level` value - allowed values"))
})
It("should reject minimal if it is a replica cluster", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- ReplicaCluster: &ReplicaClusterConfiguration{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -1108,19 +985,19 @@ var _ = Describe("configuration change validation", func() {
},
}
Expect(cluster.IsReplica()).To(BeTrue())
- Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(cluster)).To(HaveLen(1))
})
It("should allow minimal wal_level with one instance and without archive mode", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -1128,55 +1005,55 @@ var _ = Describe("configuration change validation", func() {
},
},
}
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should disallow minimal wal_level with one instance, without max_wal_senders being specified", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(cluster)).To(HaveLen(1))
})
It("should disallow changing wal_level to minimal for existing clusters", func() {
- oldCluster := Cluster{
+ oldCluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"max_wal_senders": "0",
},
},
},
}
- oldCluster.setDefaults(true)
+ oldCluster.Default()
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -1184,19 +1061,19 @@ var _ = Describe("configuration change validation", func() {
},
},
}
- Expect(cluster.validateWALLevelChange(&oldCluster)).To(HaveLen(1))
+ Expect(v.validateWALLevelChange(cluster, oldCluster)).To(HaveLen(1))
})
It("should allow retaining wal_level to minimal for existing clusters", func() {
- oldCluster := Cluster{
+ oldCluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -1204,17 +1081,17 @@ var _ = Describe("configuration change validation", func() {
},
},
}
- oldCluster.setDefaults(true)
+ oldCluster.Default()
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_level": "minimal",
"max_wal_senders": "0",
@@ -1223,147 +1100,152 @@ var _ = Describe("configuration change validation", func() {
},
},
}
- Expect(cluster.validateWALLevelChange(&oldCluster)).To(BeEmpty())
+ Expect(v.validateWALLevelChange(cluster, oldCluster)).To(BeEmpty())
})
Describe("wal_log_hints", func() {
It("should reject wal_log_hints set to an invalid value", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_log_hints": "foo",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ Expect(v.validateConfiguration(cluster)).To(HaveLen(1))
})
It("should allow wal_log_hints set to off for clusters having just one instance", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_log_hints": "off",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should not allow wal_log_hints set to off for clusters having more than one instance", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_log_hints": "off",
},
},
},
}
- Expect(cluster.validateConfiguration()).ToNot(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).ToNot(BeEmpty())
})
It("should allow wal_log_hints set to on for clusters having just one instance", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 1,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_log_hints": "on",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
It("should not allow wal_log_hints set to on for clusters having more than one instance", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.SkipWalArchiving: "enabled",
},
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"wal_log_hints": "true",
},
},
},
}
- Expect(cluster.validateConfiguration()).To(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).To(BeEmpty())
})
})
})
var _ = Describe("validate image name change", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
Context("using image name", func() {
It("doesn't complain with no changes", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{},
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{},
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty())
})
It("complains if it can't upgrade between mayor versions", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:17.0",
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:16.0",
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1))
})
It("doesn't complain if image change is valid", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:17.1",
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:17.0",
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty())
})
})
Context("using image catalog", func() {
It("complains on major upgrades", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1372,9 +1254,9 @@ var _ = Describe("validate image name change", func() {
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1383,19 +1265,19 @@ var _ = Describe("validate image name change", func() {
},
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1))
})
})
Context("changing from imageName to imageCatalogRef", func() {
It("doesn't complain when the major is the same", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:16.1",
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1404,17 +1286,17 @@ var _ = Describe("validate image name change", func() {
},
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty())
})
It("complains on major upgrades", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:16.1",
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1423,15 +1305,15 @@ var _ = Describe("validate image name change", func() {
},
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1))
})
It("complains going from default imageName to different major imageCatalogRef", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{},
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1440,18 +1322,18 @@ var _ = Describe("validate image name change", func() {
},
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1))
})
It("doesn't complain going from default imageName to same major imageCatalogRef", func() {
defaultImageRef := reference.New(versions.DefaultImageName)
version, err := pgversion.FromTag(defaultImageRef.Tag)
Expect(err).ToNot(HaveOccurred())
- clusterOld := Cluster{
- Spec: ClusterSpec{},
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1460,15 +1342,15 @@ var _ = Describe("validate image name change", func() {
},
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty())
})
})
Context("changing from imageCatalogRef to imageName", func() {
It("doesn't complain when the major is the same", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1477,17 +1359,17 @@ var _ = Describe("validate image name change", func() {
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:17.1",
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty())
})
It("complains on major upgrades", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1496,17 +1378,17 @@ var _ = Describe("validate image name change", func() {
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: "postgres:17.1",
},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1))
})
It("complains going from imageCatalogRef to different major default imageName", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1515,19 +1397,19 @@ var _ = Describe("validate image name change", func() {
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{},
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1))
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1))
})
It("doesn't complain going from imageCatalogRef to same major default imageName", func() {
imageNameRef := reference.New(versions.DefaultImageName)
version, err := pgversion.FromTag(imageNameRef.Tag)
Expect(err).ToNot(HaveOccurred())
- clusterOld := Cluster{
- Spec: ClusterSpec{
- ImageCatalogRef: &ImageCatalogRef{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
Name: "test",
Kind: "ImageCatalog",
@@ -1536,21 +1418,26 @@ var _ = Describe("validate image name change", func() {
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{},
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty())
})
})
})
var _ = Describe("recovery target", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("is mutually exclusive", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "",
TargetXID: "",
TargetName: "",
@@ -1564,15 +1451,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
It("Requires BackupID to perform PITR with TargetName", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
BackupID: "20220616T031500",
TargetTLI: "",
TargetXID: "",
@@ -1587,15 +1474,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
It("Fails when no BackupID is provided to perform PITR with TargetXID", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
BackupID: "",
TargetTLI: "",
TargetXID: "1/1",
@@ -1610,15 +1497,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
It("TargetTime's format as `YYYY-MM-DD HH24:MI:SS.FF6TZH` is valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "",
TargetXID: "",
TargetName: "",
@@ -1632,15 +1519,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6TZH:TZM` is valid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "",
TargetXID: "",
TargetName: "",
@@ -1654,15 +1541,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6 TZH:TZM` is invalid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "",
TargetXID: "",
TargetName: "",
@@ -1676,15 +1563,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
It("raises errors for invalid LSN", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "",
TargetXID: "",
TargetName: "",
@@ -1698,15 +1585,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
It("valid LSN", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "",
TargetXID: "",
TargetName: "",
@@ -1720,15 +1607,15 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
It("can be specified", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTime: "2020-01-01 01:01:00",
},
},
@@ -1736,230 +1623,245 @@ var _ = Describe("recovery target", func() {
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
When("recoveryTLI is specified", func() {
It("allows 'latest'", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "latest",
},
},
},
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
It("allows a positive integer", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "23",
},
},
},
},
}
- Expect(cluster.validateRecoveryTarget()).To(BeEmpty())
+ Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty())
})
It("prevents 0 value", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "0",
},
},
},
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
It("prevents negative values", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "-5",
},
},
},
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
It("prevents everything else beside the empty string", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
TargetTLI: "I don't remember",
},
},
},
},
}
- Expect(cluster.validateRecoveryTarget()).To(HaveLen(1))
+ Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1))
})
})
})
var _ = Describe("primary update strategy", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("allows 'unsupervised'", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- PrimaryUpdateStrategy: PrimaryUpdateStrategyUnsupervised,
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategyUnsupervised,
Instances: 3,
},
}
- Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty())
+ Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty())
})
It("allows 'supervised'", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- PrimaryUpdateStrategy: PrimaryUpdateStrategySupervised,
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategySupervised,
Instances: 3,
},
}
- Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty())
+ Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty())
})
It("prevents 'supervised' for single-instance clusters", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- PrimaryUpdateStrategy: PrimaryUpdateStrategySupervised,
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategySupervised,
Instances: 1,
},
}
- Expect(cluster.validatePrimaryUpdateStrategy()).ToNot(BeEmpty())
+ Expect(v.validatePrimaryUpdateStrategy(cluster)).ToNot(BeEmpty())
})
It("allows 'unsupervised' for single-instance clusters", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- PrimaryUpdateStrategy: PrimaryUpdateStrategyUnsupervised,
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategyUnsupervised,
Instances: 1,
},
}
- Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty())
+ Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty())
})
It("prevents everything else", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
PrimaryUpdateStrategy: "maybe",
Instances: 3,
},
}
- Expect(cluster.validatePrimaryUpdateStrategy()).ToNot(BeEmpty())
+ Expect(v.validatePrimaryUpdateStrategy(cluster)).ToNot(BeEmpty())
})
})
var _ = Describe("Number of synchronous replicas", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
Context("new-style configuration", func() {
It("can't have both new-style configuration and legacy one", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
MinSyncReplicas: 1,
MaxSyncReplicas: 2,
- PostgresConfiguration: PostgresConfiguration{
- Synchronous: &SynchronousReplicaConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Synchronous: &apiv1.SynchronousReplicaConfiguration{
Number: 2,
},
},
},
}
- Expect(cluster.validateConfiguration()).ToNot(BeEmpty())
+ Expect(v.validateConfiguration(cluster)).ToNot(BeEmpty())
})
})
Context("legacy configuration", func() {
It("should be a positive integer", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
MaxSyncReplicas: -3,
},
}
- Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty())
+ Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty())
})
It("should not be equal than the number of replicas", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
MaxSyncReplicas: 3,
},
}
- Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty())
+ Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty())
})
It("should not be greater than the number of replicas", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
MaxSyncReplicas: 5,
},
}
- Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty())
+ Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty())
})
It("can be zero", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
MaxSyncReplicas: 0,
},
}
- Expect(cluster.validateMaxSyncReplicas()).To(BeEmpty())
+ Expect(v.validateMaxSyncReplicas(cluster)).To(BeEmpty())
})
It("can be lower than the number of replicas", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
MaxSyncReplicas: 2,
},
}
- Expect(cluster.validateMaxSyncReplicas()).To(BeEmpty())
+ Expect(v.validateMaxSyncReplicas(cluster)).To(BeEmpty())
})
})
})
var _ = Describe("validateSynchronousReplicaConfiguration", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("returns no error when synchronous configuration is nil", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Synchronous: nil,
},
},
}
- errors := cluster.validateSynchronousReplicaConfiguration()
+ errors := v.validateSynchronousReplicaConfiguration(cluster)
Expect(errors).To(BeEmpty())
})
It("returns an error when number of synchronous replicas is greater than the total instances and standbys", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 2,
- PostgresConfiguration: PostgresConfiguration{
- Synchronous: &SynchronousReplicaConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Synchronous: &apiv1.SynchronousReplicaConfiguration{
Number: 5,
StandbyNamesPost: []string{"standby1"},
StandbyNamesPre: []string{"standby2"},
@@ -1967,7 +1869,7 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() {
},
},
}
- errors := cluster.validateSynchronousReplicaConfiguration()
+ errors := v.validateSynchronousReplicaConfiguration(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(
Equal("Invalid synchronous configuration: the number of synchronous replicas must be less than the " +
@@ -1975,11 +1877,11 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() {
})
It("returns an error when number of synchronous replicas is equal to total instances and standbys", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- PostgresConfiguration: PostgresConfiguration{
- Synchronous: &SynchronousReplicaConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Synchronous: &apiv1.SynchronousReplicaConfiguration{
Number: 5,
StandbyNamesPost: []string{"standby1"},
StandbyNamesPre: []string{"standby2"},
@@ -1987,18 +1889,18 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() {
},
},
}
- errors := cluster.validateSynchronousReplicaConfiguration()
+ errors := v.validateSynchronousReplicaConfiguration(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("Invalid synchronous configuration: the number of synchronous replicas " +
"must be less than the total number of instances and the provided standby names."))
})
It("returns no error when number of synchronous replicas is less than total instances and standbys", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Instances: 2,
- PostgresConfiguration: PostgresConfiguration{
- Synchronous: &SynchronousReplicaConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Synchronous: &apiv1.SynchronousReplicaConfiguration{
Number: 2,
StandbyNamesPost: []string{"standby1"},
StandbyNamesPre: []string{"standby2"},
@@ -2006,37 +1908,42 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() {
},
},
}
- errors := cluster.validateSynchronousReplicaConfiguration()
+ errors := v.validateSynchronousReplicaConfiguration(cluster)
Expect(errors).To(BeEmpty())
})
})
var _ = Describe("storage configuration validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains if the size is being reduced", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "1G",
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "512M",
},
},
}
- Expect(clusterNew.validateStorageChange(&clusterOld)).ToNot(BeEmpty())
+ Expect(v.validateStorageChange(clusterNew, clusterOld)).ToNot(BeEmpty())
})
It("does not complain if nothing has been changed", func() {
one := "one"
- clusterOld := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "1G",
StorageClass: &one,
},
@@ -2045,42 +1952,47 @@ var _ = Describe("storage configuration validation", func() {
clusterNew := clusterOld.DeepCopy()
- Expect(clusterNew.validateStorageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateStorageChange(clusterNew, clusterOld)).To(BeEmpty())
})
It("works fine is the size is being enlarged", func() {
- clusterOld := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ clusterOld := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "8G",
},
},
}
- clusterNew := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ clusterNew := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10G",
},
},
}
- Expect(clusterNew.validateStorageChange(&clusterOld)).To(BeEmpty())
+ Expect(v.validateStorageChange(clusterNew, clusterOld)).To(BeEmpty())
})
})
var _ = Describe("Cluster name validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("should be a valid DNS label", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test.one",
},
}
- Expect(cluster.validateName()).ToNot(BeEmpty())
+ Expect(v.validateName(cluster)).ToNot(BeEmpty())
})
It("should not be too long", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "abcdefghi" +
"abcdefghi" +
@@ -2093,11 +2005,11 @@ var _ = Describe("Cluster name validation", func() {
"abcdefghi",
},
}
- Expect(cluster.validateName()).ToNot(BeEmpty())
+ Expect(v.validateName(cluster)).ToNot(BeEmpty())
})
It("should not raise errors when the name is ok", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "abcdefghi" +
"abcdefghi" +
@@ -2105,47 +2017,52 @@ var _ = Describe("Cluster name validation", func() {
"abcdefghi",
},
}
- Expect(cluster.validateName()).To(BeEmpty())
+ Expect(v.validateName(cluster)).To(BeEmpty())
})
It("should return errors when the name is not DNS-1035 compliant", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "4b96d026-a956-47eb-bae8-a99b840805c3",
},
}
- Expect(cluster.validateName()).NotTo(BeEmpty())
+ Expect(v.validateName(cluster)).NotTo(BeEmpty())
})
It("should return errors when the name length is greater than 50", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: strings.Repeat("toomuchlong", 4) + "-" + "after4times",
},
}
- Expect(cluster.validateName()).NotTo(BeEmpty())
+ Expect(v.validateName(cluster)).NotTo(BeEmpty())
})
It("should return errors when having a name with dots", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "wrong.name",
},
}
- Expect(cluster.validateName()).NotTo(BeEmpty())
+ Expect(v.validateName(cluster)).NotTo(BeEmpty())
})
})
var _ = Describe("validation of the list of external clusters", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("is correct when it's empty", func() {
- cluster := Cluster{}
- Expect(cluster.validateExternalClusters()).To(BeEmpty())
+ cluster := &apiv1.Cluster{}
+ Expect(v.validateExternalClusters(cluster)).To(BeEmpty())
})
It("complains when the list of clusters contains duplicates", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- ExternalClusters: []ExternalCluster{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "one",
ConnectionParameters: map[string]string{
@@ -2161,13 +2078,13 @@ var _ = Describe("validation of the list of external clusters", func() {
},
},
}
- Expect(cluster.validateExternalClusters()).ToNot(BeEmpty())
+ Expect(v.validateExternalClusters(cluster)).ToNot(BeEmpty())
})
It("should not raise errors is the cluster name is unique", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- ExternalClusters: []ExternalCluster{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "one",
ConnectionParameters: map[string]string{
@@ -2183,69 +2100,79 @@ var _ = Describe("validation of the list of external clusters", func() {
},
},
}
- Expect(cluster.validateExternalClusters()).To(BeEmpty())
+ Expect(v.validateExternalClusters(cluster)).To(BeEmpty())
})
})
var _ = Describe("validation of an external cluster", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("ensure that one of connectionParameters and barmanObjectStore is set", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- ExternalClusters: []ExternalCluster{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ExternalClusters: []apiv1.ExternalCluster{
{},
},
},
}
- Expect(cluster.validateExternalClusters()).To(Not(BeEmpty()))
+ Expect(v.validateExternalClusters(cluster)).To(Not(BeEmpty()))
cluster.Spec.ExternalClusters[0].ConnectionParameters = map[string]string{
"dbname": "postgres",
}
cluster.Spec.ExternalClusters[0].BarmanObjectStore = nil
- Expect(cluster.validateExternalClusters()).To(BeEmpty())
+ Expect(v.validateExternalClusters(cluster)).To(BeEmpty())
cluster.Spec.ExternalClusters[0].ConnectionParameters = nil
- cluster.Spec.ExternalClusters[0].BarmanObjectStore = &BarmanObjectStoreConfiguration{}
- Expect(cluster.validateExternalClusters()).To(BeEmpty())
+ cluster.Spec.ExternalClusters[0].BarmanObjectStore = &apiv1.BarmanObjectStoreConfiguration{}
+ Expect(v.validateExternalClusters(cluster)).To(BeEmpty())
})
})
var _ = Describe("bootstrap base backup validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains if you specify the database name but not the owner for pg_basebackup", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{
Database: "app",
},
},
},
}
- result := cluster.validatePgBaseBackupApplicationDatabase()
+ result := v.validatePgBaseBackupApplicationDatabase(cluster)
Expect(result).To(HaveLen(1))
})
It("complains if you specify the owner but not the database name for pg_basebackup", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{
Owner: "app",
},
},
},
}
- result := cluster.validatePgBaseBackupApplicationDatabase()
+ result := v.validatePgBaseBackupApplicationDatabase(cluster)
Expect(result).To(HaveLen(1))
})
It("doesn't complain if you specify both database name and owner user for pg_basebackup", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{
Database: "app",
Owner: "app",
},
@@ -2253,71 +2180,76 @@ var _ = Describe("bootstrap base backup validation", func() {
},
}
- result := cluster.validatePgBaseBackupApplicationDatabase()
+ result := v.validatePgBaseBackupApplicationDatabase(cluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we are not bootstrapping using pg_basebackup", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{},
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{},
},
}
- result := recoveryCluster.validateBootstrapPgBaseBackupSource()
+ result := v.validateBootstrapPgBaseBackupSource(recoveryCluster)
Expect(result).To(BeEmpty())
})
It("complain when the source cluster doesn't exist", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{
Source: "test",
},
},
},
}
- result := recoveryCluster.validateBootstrapPgBaseBackupSource()
+ result := v.validateBootstrapPgBaseBackupSource(recoveryCluster)
Expect(result).ToNot(BeEmpty())
})
})
var _ = Describe("bootstrap recovery validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains if you specify the database name but not the owner for recovery", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Database: "app",
},
},
},
}
- result := cluster.validateRecoveryApplicationDatabase()
+ result := v.validateRecoveryApplicationDatabase(cluster)
Expect(result).To(HaveLen(1))
})
It("complains if you specify the owner but not the database name for recovery", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Owner: "app",
},
},
},
}
- result := cluster.validateRecoveryApplicationDatabase()
+ result := v.validateRecoveryApplicationDatabase(cluster)
Expect(result).To(HaveLen(1))
})
It("doesn't complain if you specify both database name and owner user for recovery", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Database: "app",
Owner: "app",
},
@@ -2325,20 +2257,20 @@ var _ = Describe("bootstrap recovery validation", func() {
},
}
- result := cluster.validateRecoveryApplicationDatabase()
+ result := v.validateRecoveryApplicationDatabase(cluster)
Expect(result).To(BeEmpty())
})
Context("does not complain when bootstrap recovery source matches one of the names of external clusters", func() {
When("using a barman object store configuration", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Source: "test",
},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
BarmanObjectStore: &api.BarmanObjectStoreConfiguration{},
@@ -2346,75 +2278,80 @@ var _ = Describe("bootstrap recovery validation", func() {
},
},
}
- errorsList := recoveryCluster.validateBootstrapRecoverySource()
+ errorsList := v.validateBootstrapRecoverySource(recoveryCluster)
Expect(errorsList).To(BeEmpty())
})
When("using a plugin configuration", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Source: "test",
},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
- PluginConfiguration: &PluginConfiguration{},
+ PluginConfiguration: &apiv1.PluginConfiguration{},
},
},
},
}
- errorsList := recoveryCluster.validateBootstrapRecoverySource()
+ errorsList := v.validateBootstrapRecoverySource(recoveryCluster)
Expect(errorsList).To(BeEmpty())
})
})
It("complains when bootstrap recovery source does not match one of the names of external clusters", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Source: "test",
},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "another-test",
},
},
},
}
- errorsList := recoveryCluster.validateBootstrapRecoverySource()
+ errorsList := v.validateBootstrapRecoverySource(recoveryCluster)
Expect(errorsList).ToNot(BeEmpty())
})
It("complains when bootstrap recovery source have no BarmanObjectStore nor plugin configuration", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Source: "test",
},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- errorsList := recoveryCluster.validateBootstrapRecoverySource()
+ errorsList := v.validateBootstrapRecoverySource(recoveryCluster)
Expect(errorsList).To(HaveLen(1))
})
})
var _ = Describe("toleration validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if we provide a proper toleration", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
Tolerations: []corev1.Toleration{
{
Key: "test",
@@ -2425,14 +2362,14 @@ var _ = Describe("toleration validation", func() {
},
},
}
- result := recoveryCluster.validateTolerations()
+ result := v.validateTolerations(recoveryCluster)
Expect(result).To(BeEmpty())
})
It("complain when the toleration ", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
Tolerations: []corev1.Toleration{
{
Key: "",
@@ -2443,111 +2380,119 @@ var _ = Describe("toleration validation", func() {
},
},
}
- result := recoveryCluster.validateTolerations()
+ result := v.validateTolerations(recoveryCluster)
Expect(result).ToNot(BeEmpty())
})
})
var _ = Describe("validate anti-affinity", func() {
- t := true
- f := false
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if we provide an empty affinity section", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{},
},
}
- result := cluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(cluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
- EnablePodAntiAffinity: &t,
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
+ EnablePodAntiAffinity: ptr.To(true),
PodAntiAffinityType: "required",
},
},
}
- result := cluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(cluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity disabled", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
- EnablePodAntiAffinity: &f,
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
+ EnablePodAntiAffinity: ptr.To(false),
PodAntiAffinityType: "required",
},
},
}
- result := recoveryCluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(recoveryCluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
- EnablePodAntiAffinity: &t,
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
+ EnablePodAntiAffinity: ptr.To(true),
PodAntiAffinityType: "preferred",
},
},
}
- result := recoveryCluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(recoveryCluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain if we provide a proper PodAntiAffinity default with anti-affinity enabled", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
- EnablePodAntiAffinity: &t,
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
+ EnablePodAntiAffinity: ptr.To(true),
PodAntiAffinityType: "",
},
},
}
- result := recoveryCluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(recoveryCluster)
Expect(result).To(BeEmpty())
})
It("complains if we provide a wrong PodAntiAffinity with anti-affinity disabled", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
- EnablePodAntiAffinity: &f,
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
+ EnablePodAntiAffinity: ptr.To(false),
PodAntiAffinityType: "error",
},
},
}
- result := recoveryCluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(recoveryCluster)
Expect(result).NotTo(BeEmpty())
})
It("complains if we provide a wrong PodAntiAffinity with anti-affinity enabled", func() {
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Affinity: AffinityConfiguration{
- EnablePodAntiAffinity: &t,
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Affinity: apiv1.AffinityConfiguration{
+ EnablePodAntiAffinity: ptr.To(true),
PodAntiAffinityType: "error",
},
},
}
- result := recoveryCluster.validateAntiAffinity()
+ result := v.validateAntiAffinity(recoveryCluster)
Expect(result).NotTo(BeEmpty())
})
})
var _ = Describe("validation of the list of external clusters", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("is correct when it's empty", func() {
- cluster := Cluster{}
- Expect(cluster.validateExternalClusters()).To(BeEmpty())
+ cluster := &apiv1.Cluster{}
+ Expect(v.validateExternalClusters(cluster)).To(BeEmpty())
})
It("complains when the list of servers contains duplicates", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- ExternalClusters: []ExternalCluster{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "one",
ConnectionParameters: map[string]string{},
@@ -2559,13 +2504,13 @@ var _ = Describe("validation of the list of external clusters", func() {
},
},
}
- Expect(cluster.validateExternalClusters()).ToNot(BeEmpty())
+ Expect(v.validateExternalClusters(cluster)).ToNot(BeEmpty())
})
It("should not raise errors is the server name is unique", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- ExternalClusters: []ExternalCluster{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "one",
ConnectionParameters: map[string]string{},
@@ -2577,88 +2522,103 @@ var _ = Describe("validation of the list of external clusters", func() {
},
},
}
- Expect(cluster.validateExternalClusters()).To(BeEmpty())
+ Expect(v.validateExternalClusters(cluster)).To(BeEmpty())
})
})
var _ = Describe("bootstrap base backup validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complain when the source cluster doesn't exist", func() {
- bootstrap := BootstrapConfiguration{}
- bpb := BootstrapPgBaseBackup{Source: "test"}
+ bootstrap := apiv1.BootstrapConfiguration{}
+ bpb := apiv1.BootstrapPgBaseBackup{Source: "test"}
bootstrap.PgBaseBackup = &bpb
- recoveryCluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{
+ recoveryCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{
Source: "test",
},
},
},
}
- result := recoveryCluster.validateBootstrapPgBaseBackupSource()
+ result := v.validateBootstrapPgBaseBackupSource(recoveryCluster)
Expect(result).ToNot(BeEmpty())
})
})
var _ = Describe("unix permissions identifiers change validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains if the PostgresGID is changed", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
- PostgresGID: defaultPostgresGID,
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresGID: apiv1.DefaultPostgresGID,
},
}
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
PostgresGID: 53,
},
}
- Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).NotTo(BeEmpty())
+ Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).NotTo(BeEmpty())
})
It("complains if the PostgresUID is changed", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
- PostgresUID: defaultPostgresUID,
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresUID: apiv1.DefaultPostgresUID,
},
}
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
PostgresGID: 74,
},
}
- Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).NotTo(BeEmpty())
+ Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).NotTo(BeEmpty())
})
It("should not complain if the values havn't been changed", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
PostgresUID: 74,
PostgresGID: 76,
},
}
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
PostgresUID: 74,
PostgresGID: 76,
},
}
- Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).To(BeEmpty())
+ Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).To(BeEmpty())
})
})
var _ = Describe("promotion token validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains if the replica token is not formatted in base64", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(false),
Source: "test",
PromotionToken: "this-is-a-wrong-token",
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
@@ -2666,22 +2626,22 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).ToNot(BeEmpty())
})
It("complains if the replica token is not valid", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(false),
Source: "test",
PromotionToken: base64.StdEncoding.EncodeToString([]byte("{}")),
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
@@ -2689,7 +2649,7 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).ToNot(BeEmpty())
})
@@ -2705,17 +2665,17 @@ var _ = Describe("promotion token validation", func() {
jsonToken, err := json.Marshal(tokenContent)
Expect(err).ToNot(HaveOccurred())
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(false),
Source: "test",
PromotionToken: base64.StdEncoding.EncodeToString(jsonToken),
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
@@ -2723,7 +2683,7 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).To(BeEmpty())
})
@@ -2739,9 +2699,9 @@ var _ = Describe("promotion token validation", func() {
jsonToken, err := json.Marshal(tokenContent)
Expect(err).ToNot(HaveOccurred())
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
PromotionToken: base64.StdEncoding.EncodeToString(jsonToken),
@@ -2749,7 +2709,7 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).NotTo(BeEmpty())
})
@@ -2765,12 +2725,12 @@ var _ = Describe("promotion token validation", func() {
jsonToken, err := json.Marshal(tokenContent)
Expect(err).ToNot(HaveOccurred())
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test2",
},
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Primary: "test",
Source: "test",
PromotionToken: base64.StdEncoding.EncodeToString(jsonToken),
@@ -2778,7 +2738,7 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).NotTo(BeEmpty())
})
@@ -2794,9 +2754,9 @@ var _ = Describe("promotion token validation", func() {
jsonToken, err := json.Marshal(tokenContent)
Expect(err).ToNot(HaveOccurred())
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Primary: "test",
Self: "test2",
Source: "test",
@@ -2805,7 +2765,7 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).NotTo(BeEmpty())
})
@@ -2821,9 +2781,9 @@ var _ = Describe("promotion token validation", func() {
jsonToken, err := json.Marshal(tokenContent)
Expect(err).ToNot(HaveOccurred())
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Primary: "test",
Self: "test",
Source: "test",
@@ -2835,270 +2795,280 @@ var _ = Describe("promotion token validation", func() {
},
}
- result := cluster.validatePromotionToken()
+ result := v.validatePromotionToken(cluster)
Expect(result).NotTo(BeEmpty())
})
})
var _ = Describe("replica mode validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains if the bootstrap method is not specified", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- Expect(cluster.validateReplicaMode()).ToNot(BeEmpty())
+ Expect(v.validateReplicaMode(cluster)).ToNot(BeEmpty())
})
It("complains if the initdb bootstrap method is used", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- Expect(cluster.validateReplicaMode()).ToNot(BeEmpty())
+ Expect(v.validateReplicaMode(cluster)).ToNot(BeEmpty())
})
It("doesn't complain about initdb if we enable the external cluster on an existing cluster", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "existing",
},
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- result := cluster.validateReplicaMode()
+ result := v.validateReplicaMode(cluster)
Expect(result).To(BeEmpty())
})
It("should complain if enabled is set to off during a transition", func() {
- old := &Cluster{
+ old := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "existing",
},
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
- Status: ClusterStatus{
- SwitchReplicaClusterStatus: SwitchReplicaClusterStatus{
+ Status: apiv1.ClusterStatus{
+ SwitchReplicaClusterStatus: apiv1.SwitchReplicaClusterStatus{
InProgress: true,
},
},
}
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "existing",
},
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(false),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
- Status: ClusterStatus{
- SwitchReplicaClusterStatus: SwitchReplicaClusterStatus{
+ Status: apiv1.ClusterStatus{
+ SwitchReplicaClusterStatus: apiv1.SwitchReplicaClusterStatus{
InProgress: true,
},
},
}
- result := cluster.validateReplicaClusterChange(old)
+ result := v.validateReplicaClusterChange(cluster, old)
Expect(result).To(HaveLen(1))
Expect(result[0].Type).To(Equal(field.ErrorTypeForbidden))
Expect(result[0].Field).To(Equal("spec.replica.enabled"))
})
It("is valid when the pg_basebackup bootstrap option is used", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- result := cluster.validateReplicaMode()
+ result := v.validateReplicaMode(cluster)
Expect(result).To(BeEmpty())
})
It("is valid when the restore bootstrap option is used", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- result := cluster.validateReplicaMode()
+ result := v.validateReplicaMode(cluster)
Expect(result).To(BeEmpty())
})
It("complains when the primary field is used with the enabled field", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Primary: "toast",
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{},
},
- ExternalClusters: []ExternalCluster{},
+ ExternalClusters: []apiv1.ExternalCluster{},
},
}
- result := cluster.validateReplicaMode()
+ result := v.validateReplicaMode(cluster)
Expect(result).ToNot(BeEmpty())
})
It("doesn't complain when the enabled field is not specified", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-2",
},
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Primary: "test",
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- result := cluster.validateReplicaMode()
+ result := v.validateReplicaMode(cluster)
Expect(result).To(BeEmpty())
})
It("doesn't complain when creating a new primary cluster with the replication stanza set", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Primary: "test",
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
},
},
}
- result := cluster.validateReplicaMode()
+ result := v.validateReplicaMode(cluster)
Expect(result).To(BeEmpty())
})
})
var _ = Describe("validate the replica cluster external clusters", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complains when the external cluster doesn't exist (source)", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Enabled: ptr.To(true),
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{},
},
- ExternalClusters: []ExternalCluster{},
+ ExternalClusters: []apiv1.ExternalCluster{},
},
}
cluster.Spec.Bootstrap.PgBaseBackup = nil
- result := cluster.validateReplicaClusterExternalClusters()
+ result := v.validateReplicaClusterExternalClusters(cluster)
Expect(result).ToNot(BeEmpty())
})
It("complains when the external cluster doesn't exist (primary)", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Primary: "test2",
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
@@ -3106,22 +3076,22 @@ var _ = Describe("validate the replica cluster external clusters", func() {
},
}
- result := cluster.validateReplicaClusterExternalClusters()
+ result := v.validateReplicaClusterExternalClusters(cluster)
Expect(result).ToNot(BeEmpty())
})
It("complains when the external cluster doesn't exist (self)", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicaCluster: &ReplicaClusterConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicaCluster: &apiv1.ReplicaClusterConfiguration{
Self: "test2",
Primary: "test",
Source: "test",
},
- Bootstrap: &BootstrapConfiguration{
- PgBaseBackup: &BootstrapPgBaseBackup{},
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ PgBaseBackup: &apiv1.BootstrapPgBaseBackup{},
},
- ExternalClusters: []ExternalCluster{
+ ExternalClusters: []apiv1.ExternalCluster{
{
Name: "test",
},
@@ -3129,155 +3099,98 @@ var _ = Describe("validate the replica cluster external clusters", func() {
},
}
- result := cluster.validateReplicaClusterExternalClusters()
+ result := v.validateReplicaClusterExternalClusters(cluster)
Expect(result).ToNot(BeEmpty())
})
})
var _ = Describe("Validation changes", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if given old cluster is nil", func() {
- newCluster := &Cluster{}
- err := newCluster.ValidateChanges(nil)
+ newCluster := &apiv1.Cluster{}
+ err := v.validateClusterChanges(newCluster, nil)
Expect(err).To(BeNil())
})
})
var _ = Describe("Backup validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("complain if there's no credentials", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{
- BarmanObjectStore: &BarmanObjectStoreConfiguration{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{
+ BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{},
},
},
}
- err := cluster.validateBackupConfiguration()
+ err := v.validateBackupConfiguration(cluster)
Expect(err).To(HaveLen(1))
})
})
var _ = Describe("Backup retention policy validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("doesn't complain if given policy is not provided", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{},
},
}
- err := cluster.validateRetentionPolicy()
+ err := v.validateRetentionPolicy(cluster)
Expect(err).To(BeEmpty())
})
It("doesn't complain if given policy is valid", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{
RetentionPolicy: "90d",
},
},
}
- err := cluster.validateRetentionPolicy()
+ err := v.validateRetentionPolicy(cluster)
Expect(err).To(BeEmpty())
})
It("complain if a given policy is not valid", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Backup: &BackupConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Backup: &apiv1.BackupConfiguration{
RetentionPolicy: "09",
},
},
}
- err := cluster.validateRetentionPolicy()
+ err := v.validateRetentionPolicy(cluster)
Expect(err).To(HaveLen(1))
})
})
-var _ = Describe("Default monitoring queries", func() {
- It("correctly set the default monitoring queries configmap and secret when none is already specified", func() {
- cluster := &Cluster{}
- cluster.defaultMonitoringQueries(&configuration.Data{
- MonitoringQueriesSecret: "test-secret",
- MonitoringQueriesConfigmap: "test-configmap",
- })
- Expect(cluster.Spec.Monitoring).NotTo(BeNil())
- Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
- Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).
- To(ContainElement(ConfigMapKeySelector{
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
- Key: DefaultMonitoringKey,
- }))
- Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
- Expect(cluster.Spec.Monitoring.CustomQueriesSecret).
- To(ContainElement(SecretKeySelector{
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
- Key: DefaultMonitoringKey,
- }))
- })
- testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{
- CustomQueriesConfigMap: []ConfigMapKeySelector{
- {
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
- Key: "test2",
- },
- },
- CustomQueriesSecret: []SecretKeySelector{
- {
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
- Key: "test3",
- },
- },
- }}}
- It("correctly set the default monitoring queries configmap when other metrics are already specified", func() {
- modifiedCluster := testCluster.DeepCopy()
- modifiedCluster.defaultMonitoringQueries(&configuration.Data{
- MonitoringQueriesConfigmap: "test-configmap",
- })
-
- Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil())
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
- To(ContainElement(ConfigMapKeySelector{
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName},
- Key: "test2",
- }))
-
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
- To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret))
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
- To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap))
- })
- It("correctly set the default monitoring queries secret when other metrics are already specified", func() {
- modifiedCluster := testCluster.DeepCopy()
- modifiedCluster.defaultMonitoringQueries(&configuration.Data{
- MonitoringQueriesSecret: "test-secret",
- })
-
- Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil())
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty())
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty())
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
- To(ContainElement(SecretKeySelector{
- LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName},
- Key: "test3",
- }))
-
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).
- To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap))
- Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).
- To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret))
+var _ = Describe("validation of imports", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
})
-})
-var _ = Describe("validation of imports", func() {
It("rejects unrecognized import type", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
+ Import: &apiv1.Import{
Type: "fooBar",
},
},
@@ -3285,19 +3198,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("rejects microservice import with roles", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MicroserviceSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MicroserviceSnapshotType,
Databases: []string{"foo"},
Roles: []string{"bar"},
},
@@ -3306,19 +3219,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("rejects microservice import without exactly one database", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MicroserviceSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MicroserviceSnapshotType,
Databases: []string{"foo", "bar"},
},
},
@@ -3326,19 +3239,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("rejects microservice import with a wildcard on the database name", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MicroserviceSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MicroserviceSnapshotType,
Databases: []string{"*foo"},
},
},
@@ -3346,19 +3259,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("accepts microservice import when well specified", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MicroserviceSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MicroserviceSnapshotType,
Databases: []string{"foo"},
},
},
@@ -3366,19 +3279,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(BeEmpty())
})
It("rejects monolith import with no databases", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MonolithSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MonolithSnapshotType,
Databases: []string{},
},
},
@@ -3386,19 +3299,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("rejects monolith import with PostImport Application SQL", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MonolithSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MonolithSnapshotType,
Databases: []string{"foo"},
PostImportApplicationSQL: []string{"select * from bar"},
},
@@ -3407,19 +3320,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("rejects monolith import with wildcards alongside specific values", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MonolithSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MonolithSnapshotType,
Databases: []string{"bar", "*"},
},
},
@@ -3427,17 +3340,17 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(HaveLen(1))
- cluster = &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MonolithSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MonolithSnapshotType,
Databases: []string{"foo"},
Roles: []string{"baz", "*"},
},
@@ -3446,19 +3359,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result = cluster.validateImport()
+ result = v.validateImport(cluster)
Expect(result).To(HaveLen(1))
})
It("accepts monolith import with proper values", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MonolithSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MonolithSnapshotType,
Databases: []string{"foo"},
},
},
@@ -3466,19 +3379,19 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(BeEmpty())
})
It("accepts monolith import with wildcards", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- InitDB: &BootstrapInitDB{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ InitDB: &apiv1.BootstrapInitDB{
Database: "app",
Owner: "app",
- Import: &Import{
- Type: MonolithSnapshotType,
+ Import: &apiv1.Import{
+ Type: apiv1.MonolithSnapshotType,
Databases: []string{"*"},
Roles: []string{"*"},
},
@@ -3487,18 +3400,23 @@ var _ = Describe("validation of imports", func() {
},
}
- result := cluster.validateImport()
+ result := v.validateImport(cluster)
Expect(result).To(BeEmpty())
})
})
var _ = Describe("validation of replication slots configuration", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("can be enabled on the default PostgreSQL image", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
},
UpdateInterval: 0,
@@ -3507,13 +3425,13 @@ var _ = Describe("validation of replication slots configuration", func() {
}
cluster.Default()
- result := cluster.validateReplicationSlots()
+ result := v.validateReplicationSlots(cluster)
Expect(result).To(BeEmpty())
})
It("set replicationSlots by default", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
},
}
@@ -3522,15 +3440,15 @@ var _ = Describe("validation of replication slots configuration", func() {
Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil())
Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue()))
- result := cluster.validateReplicationSlots()
+ result := v.validateReplicationSlots(cluster)
Expect(result).To(BeEmpty())
})
It("set replicationSlots.highAvailability by default", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
UpdateInterval: 30,
},
},
@@ -3539,16 +3457,16 @@ var _ = Describe("validation of replication slots configuration", func() {
Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil())
Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue()))
- result := cluster.validateReplicationSlots()
+ result := v.validateReplicationSlots(cluster)
Expect(result).To(BeEmpty())
})
It("allows enabling replication slots on the fly", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(false),
},
},
@@ -3557,22 +3475,22 @@ var _ = Describe("validation of replication slots configuration", func() {
oldCluster.Default()
newCluster := oldCluster.DeepCopy()
- newCluster.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ newCluster.Spec.ReplicationSlots = &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
SlotPrefix: "_test_",
},
}
- Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(BeEmpty())
+ Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(BeEmpty())
})
It("prevents changing the slot prefix while replication slots are enabled", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
SlotPrefix: "_test_",
},
@@ -3583,15 +3501,15 @@ var _ = Describe("validation of replication slots configuration", func() {
newCluster := oldCluster.DeepCopy()
newCluster.Spec.ReplicationSlots.HighAvailability.SlotPrefix = "_toast_"
- Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(HaveLen(1))
+ Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(HaveLen(1))
})
It("prevents removing the replication slot section when replication slots are enabled", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
SlotPrefix: "_test_",
},
@@ -3602,15 +3520,15 @@ var _ = Describe("validation of replication slots configuration", func() {
newCluster := oldCluster.DeepCopy()
newCluster.Spec.ReplicationSlots = nil
- Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(HaveLen(1))
+ Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(HaveLen(1))
})
It("allows disabling the replication slots", func() {
- oldCluster := &Cluster{
- Spec: ClusterSpec{
+ oldCluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
SlotPrefix: "_test_",
},
@@ -3621,55 +3539,60 @@ var _ = Describe("validation of replication slots configuration", func() {
newCluster := oldCluster.DeepCopy()
newCluster.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(false)
- Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(BeEmpty())
+ Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(BeEmpty())
})
It("should return an error when SynchronizeReplicasConfiguration is not nil and has invalid regex", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- SynchronizeReplicas: &SynchronizeReplicasConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{
ExcludePatterns: []string{"([a-zA-Z]+"},
},
},
},
}
- errors := cluster.validateReplicationSlots()
+ errors := v.validateReplicationSlots(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("Cannot configure synchronizeReplicas. Invalid regexes were found"))
})
It("should not return an error when SynchronizeReplicasConfiguration is not nil and regex is valid", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
- SynchronizeReplicas: &SynchronizeReplicasConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{
ExcludePatterns: []string{"validpattern"},
},
},
},
}
- errors := cluster.validateReplicationSlots()
+ errors := v.validateReplicationSlots(cluster)
Expect(errors).To(BeEmpty())
})
It("should not return an error when SynchronizeReplicasConfiguration is nil", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
ImageName: versions.DefaultImageName,
- ReplicationSlots: &ReplicationSlotsConfiguration{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
SynchronizeReplicas: nil,
},
},
}
- errors := cluster.validateReplicationSlots()
+ errors := v.validateReplicationSlots(cluster)
Expect(errors).To(BeEmpty())
})
})
var _ = Describe("Environment variables validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
When("an environment variable is given", func() {
It("detects if it is valid", func() {
Expect(isReservedEnvironmentVariable("PGDATA")).To(BeTrue())
@@ -3682,8 +3605,8 @@ var _ = Describe("Environment variables validation", func() {
When("a ClusterSpec is given", func() {
It("detects if the environment variable list is correct", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Env: []corev1.EnvVar{
{
Name: "TZ",
@@ -3693,12 +3616,12 @@ var _ = Describe("Environment variables validation", func() {
},
}
- Expect(cluster.validateEnv()).To(BeEmpty())
+ Expect(v.validateEnv(cluster)).To(BeEmpty())
})
It("detects if the environment variable list contains a reserved variable", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
Env: []corev1.EnvVar{
{
Name: "TZ",
@@ -3712,37 +3635,42 @@ var _ = Describe("Environment variables validation", func() {
},
}
- Expect(cluster.validateEnv()).To(HaveLen(1))
+ Expect(v.validateEnv(cluster)).To(HaveLen(1))
})
})
})
var _ = Describe("Storage configuration validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
When("a ClusterSpec is given", func() {
It("produces one error if storage is not set at all", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{},
},
}
- Expect(cluster.validateStorageSize()).To(HaveLen(1))
+ Expect(v.validateStorageSize(cluster)).To(HaveLen(1))
})
It("succeeds if storage size is set", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "1G",
},
},
}
- Expect(cluster.validateStorageSize()).To(BeEmpty())
+ Expect(v.validateStorageSize(cluster)).To(BeEmpty())
})
It("succeeds if storage is not set but a pvc template specifies storage", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- StorageConfiguration: StorageConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")},
@@ -3751,80 +3679,90 @@ var _ = Describe("Storage configuration validation", func() {
},
},
}
- Expect(cluster.validateStorageSize()).To(BeEmpty())
+ Expect(v.validateStorageSize(cluster)).To(BeEmpty())
})
})
})
var _ = Describe("Ephemeral volume configuration validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("succeeds if no ephemeral configuration is present", func() {
- cluster := Cluster{
- Spec: ClusterSpec{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty())
+ Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty())
})
It("succeeds if ephemeralVolumeSource is set", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
EphemeralVolumeSource: &corev1.EphemeralVolumeSource{},
},
}
- Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty())
+ Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty())
})
It("succeeds if ephemeralVolumesSizeLimit.temporaryData is set", func() {
onegi := resource.MustParse("1Gi")
- cluster := Cluster{
- Spec: ClusterSpec{
- EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{
TemporaryData: &onegi,
},
},
}
- Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty())
+ Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty())
})
It("succeeds if ephemeralVolumeSource and ephemeralVolumesSizeLimit.shm are set", func() {
onegi := resource.MustParse("1Gi")
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
EphemeralVolumeSource: &corev1.EphemeralVolumeSource{},
- EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{
+ EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{
Shm: &onegi,
},
},
}
- Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty())
+ Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty())
})
It("produces one error if conflicting ephemeral storage options are set", func() {
onegi := resource.MustParse("1Gi")
- cluster := Cluster{
- Spec: ClusterSpec{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
EphemeralVolumeSource: &corev1.EphemeralVolumeSource{},
- EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{
+ EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{
TemporaryData: &onegi,
},
},
}
- Expect(cluster.validateEphemeralVolumeSource()).To(HaveLen(1))
+ Expect(v.validateEphemeralVolumeSource(cluster)).To(HaveLen(1))
})
})
var _ = Describe("Role management validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("should succeed if there is no management stanza", func() {
- cluster := Cluster{
- Spec: ClusterSpec{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- Expect(cluster.validateManagedRoles()).To(BeEmpty())
+ Expect(v.validateManagedRoles(cluster)).To(BeEmpty())
})
It("should succeed if the role defined is not reserved", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Roles: []RoleConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Roles: []apiv1.RoleConfiguration{
{
Name: "non-conflicting",
},
@@ -3832,14 +3770,14 @@ var _ = Describe("Role management validation", func() {
},
},
}
- Expect(cluster.validateManagedRoles()).To(BeEmpty())
+ Expect(v.validateManagedRoles(cluster)).To(BeEmpty())
})
It("should produce an error on invalid connection limit", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Roles: []RoleConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Roles: []apiv1.RoleConfiguration{
{
Name: "non-conflicting",
ConnectionLimit: -3,
@@ -3848,14 +3786,14 @@ var _ = Describe("Role management validation", func() {
},
},
}
- Expect(cluster.validateManagedRoles()).To(HaveLen(1))
+ Expect(v.validateManagedRoles(cluster)).To(HaveLen(1))
})
It("should produce an error if the role is reserved", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Roles: []RoleConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Roles: []apiv1.RoleConfiguration{
{
Name: "postgres",
},
@@ -3863,14 +3801,14 @@ var _ = Describe("Role management validation", func() {
},
},
}
- Expect(cluster.validateManagedRoles()).To(HaveLen(1))
+ Expect(v.validateManagedRoles(cluster)).To(HaveLen(1))
})
It("should produce two errors if the role is reserved and the connection limit is invalid", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Roles: []RoleConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Roles: []apiv1.RoleConfiguration{
{
Name: "postgres",
ConnectionLimit: -3,
@@ -3879,14 +3817,14 @@ var _ = Describe("Role management validation", func() {
},
},
}
- Expect(cluster.validateManagedRoles()).To(HaveLen(2))
+ Expect(v.validateManagedRoles(cluster)).To(HaveLen(2))
})
It("should produce an error if we define two roles with the same name", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Roles: []RoleConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Roles: []apiv1.RoleConfiguration{
{
Name: "my_test",
ConnectionLimit: -1,
@@ -3901,19 +3839,19 @@ var _ = Describe("Role management validation", func() {
},
},
}
- Expect(cluster.validateManagedRoles()).To(HaveLen(1))
+ Expect(v.validateManagedRoles(cluster)).To(HaveLen(1))
})
It("should produce an error if we have a password secret AND DisablePassword in a role", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Roles: []RoleConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Roles: []apiv1.RoleConfiguration{
{
Name: "my_test",
Superuser: true,
BypassRLS: true,
DisablePassword: true,
- PasswordSecret: &LocalObjectReference{
+ PasswordSecret: &apiv1.LocalObjectReference{
Name: "myPassword",
},
ConnectionLimit: -1,
@@ -3922,27 +3860,32 @@ var _ = Describe("Role management validation", func() {
},
},
}
- Expect(cluster.validateManagedRoles()).To(HaveLen(1))
+ Expect(v.validateManagedRoles(cluster)).To(HaveLen(1))
})
})
var _ = Describe("Managed Extensions validation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("should succeed if no extension is enabled", func() {
- cluster := Cluster{
- Spec: ClusterSpec{},
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{},
}
- Expect(cluster.validateManagedExtensions()).To(BeEmpty())
+ Expect(v.validateManagedExtensions(cluster)).To(BeEmpty())
})
It("should fail if hot_standby_feedback is set to an invalid value", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
},
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"hot_standby_feedback": "foo",
"pg_failover_slots.synchronize_slot_names": "my_slot",
@@ -3950,18 +3893,18 @@ var _ = Describe("Managed Extensions validation", func() {
},
},
}
- Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2))
+ Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(2))
})
It("should succeed if pg_failover_slots and its prerequisites are enabled", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
},
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"hot_standby_feedback": "on",
"pg_failover_slots.synchronize_slot_names": "my_slot",
@@ -3969,26 +3912,26 @@ var _ = Describe("Managed Extensions validation", func() {
},
},
}
- Expect(cluster.validatePgFailoverSlots()).To(BeEmpty())
+ Expect(v.validatePgFailoverSlots(cluster)).To(BeEmpty())
})
It("should produce two errors if pg_failover_slots is enabled and its prerequisites are disabled", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"pg_failover_slots.synchronize_slot_names": "my_slot",
},
},
},
}
- Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2))
+ Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(2))
})
It("should produce an error if pg_failover_slots is enabled and HA slots are disabled", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"hot_standby_feedback": "yes",
"pg_failover_slots.synchronize_slot_names": "my_slot",
@@ -3996,64 +3939,69 @@ var _ = Describe("Managed Extensions validation", func() {
},
},
}
- Expect(cluster.validatePgFailoverSlots()).To(HaveLen(1))
+ Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1))
})
It("should produce an error if pg_failover_slots is enabled and hot_standby_feedback is disabled", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- ReplicationSlots: &ReplicationSlotsConfiguration{
- HighAvailability: &ReplicationSlotsHAConfiguration{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{
+ HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{
Enabled: ptr.To(true),
},
},
- PostgresConfiguration: PostgresConfiguration{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{
"pg_failover_slots.synchronize_slot_names": "my_slot",
},
},
},
}
- Expect(cluster.validatePgFailoverSlots()).To(HaveLen(1))
+ Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1))
})
})
var _ = Describe("Recovery from volume snapshot validation", func() {
- clusterFromRecovery := func(recovery *BootstrapRecovery) *Cluster {
- return &Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
+ clusterFromRecovery := func(recovery *apiv1.BootstrapRecovery) *apiv1.Cluster {
+ return &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
Recovery: recovery,
},
- WalStorage: &StorageConfiguration{},
+ WalStorage: &apiv1.StorageConfiguration{},
},
}
}
It("should produce an error when defining two recovery sources at the same time", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
Source: "sourceName",
- Backup: &BackupSource{},
- VolumeSnapshots: &DataSource{},
+ Backup: &apiv1.BackupSource{},
+ VolumeSnapshots: &apiv1.DataSource{},
},
},
},
}
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1))
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1))
})
It("should produce an error when defining a backupID while recovering using a DataSource", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- RecoveryTarget: &RecoveryTarget{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ RecoveryTarget: &apiv1.RecoveryTarget{
BackupID: "20220616T031500",
},
- VolumeSnapshots: &DataSource{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(""),
Kind: "PersistentVolumeClaim",
@@ -4064,15 +4012,15 @@ var _ = Describe("Recovery from volume snapshot validation", func() {
},
},
}
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1))
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1))
})
It("should produce an error when asking to recovery WALs from a snapshot without having storage for it", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
Kind: "VolumeSnapshot",
@@ -4088,15 +4036,15 @@ var _ = Describe("Recovery from volume snapshot validation", func() {
},
},
}
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1))
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1))
})
It("should not produce an error when the configuration is sound", func() {
- cluster := Cluster{
- Spec: ClusterSpec{
- Bootstrap: &BootstrapConfiguration{
- Recovery: &BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
Kind: "VolumeSnapshot",
@@ -4110,49 +4058,49 @@ var _ = Describe("Recovery from volume snapshot validation", func() {
},
},
},
- WalStorage: &StorageConfiguration{},
+ WalStorage: &apiv1.StorageConfiguration{},
},
}
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty())
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty())
})
It("accepts recovery from a VolumeSnapshot", func() {
- cluster := clusterFromRecovery(&BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
- Kind: VolumeSnapshotKind,
+ Kind: apiv1.VolumeSnapshotKind,
Name: "pgdata",
},
WalStorage: &corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
- Kind: VolumeSnapshotKind,
+ Kind: apiv1.VolumeSnapshotKind,
Name: "pgwal",
},
},
})
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty())
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty())
})
It("accepts recovery from a VolumeSnapshot, while restoring WALs from an object store", func() {
- cluster := clusterFromRecovery(&BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
- Kind: VolumeSnapshotKind,
+ Kind: apiv1.VolumeSnapshotKind,
Name: "pgdata",
},
},
Source: "pg-cluster",
})
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty())
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty())
})
When("using an nil apiGroup", func() {
It("accepts recovery from a PersistentVolumeClaim", func() {
- cluster := clusterFromRecovery(&BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
Kind: "VolumeSnapshot",
@@ -4165,14 +4113,14 @@ var _ = Describe("Recovery from volume snapshot validation", func() {
},
},
})
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty())
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty())
})
})
When("using an empty apiGroup", func() {
It("accepts recovery from a PersistentVolumeClaim", func() {
- cluster := clusterFromRecovery(&BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(storagesnapshotv1.GroupName),
Kind: "VolumeSnapshot",
@@ -4185,13 +4133,13 @@ var _ = Describe("Recovery from volume snapshot validation", func() {
},
},
})
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty())
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty())
})
})
It("prevent recovery from other Objects", func() {
- cluster := clusterFromRecovery(&BootstrapRecovery{
- VolumeSnapshots: &DataSource{
+ cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
Storage: corev1.TypedLocalObjectReference{
APIGroup: ptr.To(""),
Kind: "Secret",
@@ -4204,17 +4152,18 @@ var _ = Describe("Recovery from volume snapshot validation", func() {
},
},
})
- Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(2))
+ Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(2))
})
})
var _ = Describe("validateResources", func() {
- var cluster *Cluster
+ var cluster *apiv1.Cluster
+ var v *ClusterCustomValidator
BeforeEach(func() {
- cluster = &Cluster{
- Spec: ClusterSpec{
- PostgresConfiguration: PostgresConfiguration{
+ cluster = &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
Parameters: map[string]string{},
},
Resources: corev1.ResourceRequirements{
@@ -4223,13 +4172,14 @@ var _ = Describe("validateResources", func() {
},
},
}
+ v = &ClusterCustomValidator{}
})
It("returns an error when the CPU request is greater than CPU limit", func() {
cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2")
cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit"))
})
@@ -4238,7 +4188,7 @@ var _ = Describe("validateResources", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi")
cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("Memory request is greater than the limit"))
})
@@ -4247,7 +4197,7 @@ var _ = Describe("validateResources", func() {
cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("1")
cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
@@ -4255,7 +4205,7 @@ var _ = Describe("validateResources", func() {
cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2")
cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("Ephemeral storage request is greater than the limit"))
})
@@ -4268,7 +4218,7 @@ var _ = Describe("validateResources", func() {
cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2")
cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(3))
Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit"))
Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit"))
@@ -4281,7 +4231,7 @@ var _ = Describe("validateResources", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi")
cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(2))
Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit"))
Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit"))
@@ -4293,38 +4243,38 @@ var _ = Describe("validateResources", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi")
cluster.Spec.Resources.Limits["memory"] = resource.MustParse("2Gi")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
It("returns no errors when CPU request is set but limit is nil", func() {
cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("1")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
It("returns no errors when CPU limit is set but request is nil", func() {
cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
It("returns no errors when Memory request is set but limit is nil", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
It("returns no errors when Memory limit is set but request is nil", func() {
cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi")
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
It("returns an error when memoryRequest is less than shared_buffers in kB", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi")
cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000000kB"
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value"))
})
@@ -4332,7 +4282,7 @@ var _ = Describe("validateResources", func() {
It("returns an error when memoryRequest is less than shared_buffers in MB", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1000Mi")
cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000MB"
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(HaveLen(1))
Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value"))
})
@@ -4340,176 +4290,181 @@ var _ = Describe("validateResources", func() {
It("returns no errors when memoryRequest is greater than or equal to shared_buffers in GB", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi")
cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB"
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
It("returns no errors when shared_buffers is in a format that can't be parsed", func() {
cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi")
cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "invalid_value"
- errors := cluster.validateResources()
+ errors := v.validateResources(cluster)
Expect(errors).To(BeEmpty())
})
})
var _ = Describe("Tablespaces validation", func() {
- createFakeTemporaryTbsConf := func(name string) TablespaceConfiguration {
- return TablespaceConfiguration{
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
+ createFakeTemporaryTbsConf := func(name string) apiv1.TablespaceConfiguration {
+ return apiv1.TablespaceConfiguration{
Name: name,
- Storage: StorageConfiguration{
+ Storage: apiv1.StorageConfiguration{
Size: "10Gi",
},
}
}
It("should succeed if there is no tablespaces section", func() {
- cluster := Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
},
}
- Expect(cluster.Validate()).To(BeEmpty())
+ Expect(v.validate(cluster)).To(BeEmpty())
})
It("should succeed if the tablespaces are ok", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("my_tablespace"),
},
},
}
- Expect(cluster.Validate()).To(BeEmpty())
+ Expect(v.validate(cluster)).To(BeEmpty())
})
It("should produce an error if the tablespace name is too long", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
// each repetition is 14 char long, so 5x14 = 70 char > postgres limit
createFakeTemporaryTbsConf("my_tablespace1my_tablespace2my_tablespace3my_tablespace4my_tablespace5"),
},
},
}
- Expect(cluster.Validate()).To(HaveLen(1))
+ Expect(v.validate(cluster)).To(HaveLen(1))
})
It("should produce an error if the tablespace name is reserved by Postgres", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("pg_foo"),
},
},
}
- Expect(cluster.Validate()).To(HaveLen(1))
+ Expect(v.validate(cluster)).To(HaveLen(1))
})
It("should produce an error if the tablespace name is not valid", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
// each repetition is 14 char long, so 5x14 = 70 char > postgres limit
createFakeTemporaryTbsConf("my-^&sdf;"),
},
},
}
- Expect(cluster.Validate()).To(HaveLen(1))
+ Expect(v.validate(cluster)).To(HaveLen(1))
})
It("should produce an error if there are duplicate tablespaces", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("my_tablespace"),
createFakeTemporaryTbsConf("my_TAblespace"),
createFakeTemporaryTbsConf("another"),
},
},
}
- Expect(cluster.Validate()).To(HaveLen(1))
+ Expect(v.validate(cluster)).To(HaveLen(1))
})
It("should produce an error if the storage configured for the tablespace is invalid", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
// each repetition is 14 char long, so 5x14 = 70 char > postgres limit
{
Name: "my_tablespace1",
- Storage: StorageConfiguration{
+ Storage: apiv1.StorageConfiguration{
Size: "10Gibberish",
},
},
},
},
}
- Expect(cluster.Validate()).To(HaveLen(1))
+ Expect(v.validate(cluster)).To(HaveLen(1))
})
It("should produce two errors if two tablespaces have errors", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
// each repetition is 14 char long, so 5x14 = 70 char > postgres limit
{
Name: "my_tablespace1",
- Storage: StorageConfiguration{
+ Storage: apiv1.StorageConfiguration{
Size: "10Gibberish",
},
},
@@ -4518,125 +4473,125 @@ var _ = Describe("Tablespaces validation", func() {
},
},
}
- Expect(cluster.Validate()).To(HaveLen(2))
+ Expect(v.validate(cluster)).To(HaveLen(2))
})
It("should produce an error if the tablespaces section is deleted", func() {
- oldCluster := &Cluster{
+ oldCluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("my-tablespace1"),
},
},
}
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
},
}
- Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1))
+ Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1))
})
It("should produce an error if a tablespace is deleted", func() {
- oldCluster := &Cluster{
+ oldCluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("my-tablespace1"),
createFakeTemporaryTbsConf("my-tablespace2"),
},
},
}
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("my-tablespace1"),
},
},
}
- Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1))
+ Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1))
})
It("should produce an error if a tablespace is reduced in size", func() {
- oldCluster := &Cluster{
+ oldCluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
createFakeTemporaryTbsConf("my-tablespace1"),
},
},
}
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
{
Name: "my-tablespace1",
- Storage: StorageConfiguration{
+ Storage: apiv1.StorageConfiguration{
Size: "9Gi",
},
},
},
},
}
- Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1))
+ Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1))
})
It("should not complain when the backup section refers to a tbs that is defined", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
{
Name: "my-tablespace1",
- Storage: StorageConfiguration{
+ Storage: apiv1.StorageConfiguration{
Size: "9Gi",
},
},
},
- Backup: &BackupConfiguration{
- VolumeSnapshot: &VolumeSnapshotConfiguration{
+ Backup: &apiv1.BackupConfiguration{
+ VolumeSnapshot: &apiv1.VolumeSnapshotConfiguration{
TablespaceClassName: map[string]string{
"my-tablespace1": "random",
},
@@ -4644,29 +4599,29 @@ var _ = Describe("Tablespaces validation", func() {
},
},
}
- Expect(cluster.validateTablespaceBackupSnapshot()).To(BeEmpty())
+ Expect(v.validateTablespaceBackupSnapshot(cluster)).To(BeEmpty())
})
It("should complain when the backup section refers to a tbs that is not defined", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
},
- Spec: ClusterSpec{
+ Spec: apiv1.ClusterSpec{
Instances: 3,
- StorageConfiguration: StorageConfiguration{
+ StorageConfiguration: apiv1.StorageConfiguration{
Size: "10Gi",
},
- Tablespaces: []TablespaceConfiguration{
+ Tablespaces: []apiv1.TablespaceConfiguration{
{
Name: "my-tablespace1",
- Storage: StorageConfiguration{
+ Storage: apiv1.StorageConfiguration{
Size: "9Gi",
},
},
},
- Backup: &BackupConfiguration{
- VolumeSnapshot: &VolumeSnapshotConfiguration{
+ Backup: &apiv1.BackupConfiguration{
+ VolumeSnapshot: &apiv1.VolumeSnapshotConfiguration{
TablespaceClassName: map[string]string{
"not-present": "random",
},
@@ -4674,107 +4629,114 @@ var _ = Describe("Tablespaces validation", func() {
},
},
}
- Expect(cluster.validateTablespaceBackupSnapshot()).To(HaveLen(1))
+ Expect(v.validateTablespaceBackupSnapshot(cluster)).To(HaveLen(1))
})
})
var _ = Describe("Validate hibernation", func() {
+ var v *ClusterCustomValidator
+ BeforeEach(func() {
+ v = &ClusterCustomValidator{}
+ })
+
It("should succeed if hibernation is set to 'on'", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOn),
},
},
}
- Expect(cluster.validateHibernationAnnotation()).To(BeEmpty())
+ Expect(v.validateHibernationAnnotation(cluster)).To(BeEmpty())
})
It("should succeed if hibernation is set to 'off'", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOff),
},
},
}
- Expect(cluster.validateHibernationAnnotation()).To(BeEmpty())
+ Expect(v.validateHibernationAnnotation(cluster)).To(BeEmpty())
})
It("should fail if hibernation is set to an invalid value", func() {
- cluster := &Cluster{
+ cluster := &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
utils.HibernationAnnotationName: "",
},
},
}
- Expect(cluster.validateHibernationAnnotation()).To(HaveLen(1))
+ Expect(v.validateHibernationAnnotation(cluster)).To(HaveLen(1))
})
})
var _ = Describe("validateManagedServices", func() {
- var cluster *Cluster
+ var cluster *apiv1.Cluster
+ var v *ClusterCustomValidator
BeforeEach(func() {
- cluster = &Cluster{
+ cluster = &apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
- Spec: ClusterSpec{
- Managed: &ManagedConfiguration{
- Services: &ManagedServices{
- Additional: []ManagedService{},
+ Spec: apiv1.ClusterSpec{
+ Managed: &apiv1.ManagedConfiguration{
+ Services: &apiv1.ManagedServices{
+ Additional: []apiv1.ManagedService{},
},
},
},
}
+ v = &ClusterCustomValidator{}
})
Context("when Managed or Services is nil", func() {
It("should return no errors", func() {
cluster.Spec.Managed = nil
- Expect(cluster.validateManagedServices()).To(BeNil())
+ Expect(v.validateManagedServices(cluster)).To(BeNil())
- cluster.Spec.Managed = &ManagedConfiguration{}
+ cluster.Spec.Managed = &apiv1.ManagedConfiguration{}
cluster.Spec.Managed.Services = nil
- Expect(cluster.validateManagedServices()).To(BeNil())
+ Expect(v.validateManagedServices(cluster)).To(BeNil())
})
})
Context("when there are no duplicate names", func() {
It("should return no errors", func() {
- cluster.Spec.Managed.Services.Additional = []ManagedService{
+ cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{
{
- ServiceTemplate: ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "service1"},
+ ServiceTemplate: apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "service1"},
},
},
{
- ServiceTemplate: ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "service2"},
+ ServiceTemplate: apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "service2"},
},
},
}
- Expect(cluster.validateManagedServices()).To(BeNil())
+ Expect(v.validateManagedServices(cluster)).To(BeNil())
})
})
Context("when there are duplicate names", func() {
It("should return an error", func() {
- cluster.Spec.Managed.Services.Additional = []ManagedService{
+ cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{
{
- ServiceTemplate: ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "service1"},
+ ServiceTemplate: apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "service1"},
},
},
{
- ServiceTemplate: ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "service1"},
+ ServiceTemplate: apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "service1"},
},
},
}
- errs := cluster.validateManagedServices()
+ errs := v.validateManagedServices(cluster)
Expect(errs).To(HaveLen(1))
Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid))
Expect(errs[0].Field).To(Equal("spec.managed.services.additional"))
@@ -4784,14 +4746,14 @@ var _ = Describe("validateManagedServices", func() {
Context("when service template validation fails", func() {
It("should return an error", func() {
- cluster.Spec.Managed.Services.Additional = []ManagedService{
+ cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{
{
- ServiceTemplate: ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: ""},
+ ServiceTemplate: apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: ""},
},
},
}
- errs := cluster.validateManagedServices()
+ errs := v.validateManagedServices(cluster)
Expect(errs).To(HaveLen(1))
Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid))
Expect(errs[0].Field).To(Equal("spec.managed.services.additional[0]"))
@@ -4804,13 +4766,13 @@ var _ = Describe("validateManagedServices", func() {
Expect(err.Field).To(Equal(fmt.Sprintf("spec.managed.services.additional[%d]", index)))
Expect(err.Detail).To(Equal(expectedDetail))
}
- cluster.Spec.Managed.Services.Additional = []ManagedService{
- {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadWriteName()}}},
- {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadName()}}},
- {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadOnlyName()}}},
- {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceAnyName()}}},
+ cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{
+ {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadWriteName()}}},
+ {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadName()}}},
+ {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadOnlyName()}}},
+ {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceAnyName()}}},
}
- errs := cluster.validateManagedServices()
+ errs := v.validateManagedServices(cluster)
Expect(errs).To(HaveLen(4))
assertError("test-rw", 0, errs[0])
assertError("test-r", 1, errs[1])
@@ -4821,19 +4783,19 @@ var _ = Describe("validateManagedServices", func() {
Context("disabledDefault service validation", func() {
It("should allow the disablement of ro and r service", func() {
- cluster.Spec.Managed.Services.DisabledDefaultServices = []ServiceSelectorType{
- ServiceSelectorTypeR,
- ServiceSelectorTypeRO,
+ cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{
+ apiv1.ServiceSelectorTypeR,
+ apiv1.ServiceSelectorTypeRO,
}
- errs := cluster.validateManagedServices()
+ errs := v.validateManagedServices(cluster)
Expect(errs).To(BeEmpty())
})
It("should not allow the disablement of rw service", func() {
- cluster.Spec.Managed.Services.DisabledDefaultServices = []ServiceSelectorType{
- ServiceSelectorTypeRW,
+ cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{
+ apiv1.ServiceSelectorTypeRW,
}
- errs := cluster.validateManagedServices()
+ errs := v.validateManagedServices(cluster)
Expect(errs).To(HaveLen(1))
Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid))
Expect(errs[0].Field).To(Equal("spec.managed.services.disabledDefaultServices"))
@@ -4844,7 +4806,7 @@ var _ = Describe("validateManagedServices", func() {
var _ = Describe("ServiceTemplate Validation", func() {
var (
path *field.Path
- serviceSpecs ServiceTemplateSpec
+ serviceSpecs apiv1.ServiceTemplateSpec
)
BeforeEach(func() {
@@ -4854,8 +4816,8 @@ var _ = Describe("ServiceTemplate Validation", func() {
Describe("validateServiceTemplate", func() {
Context("when name is required", func() {
It("should return an error if the name is empty", func() {
- serviceSpecs = ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: ""},
+ serviceSpecs = apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: ""},
}
errs := validateServiceTemplate(path, true, serviceSpecs)
@@ -4864,8 +4826,8 @@ var _ = Describe("ServiceTemplate Validation", func() {
})
It("should not return an error if the name is present", func() {
- serviceSpecs = ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "valid-name"},
+ serviceSpecs = apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "valid-name"},
}
errs := validateServiceTemplate(path, true, serviceSpecs)
@@ -4875,8 +4837,8 @@ var _ = Describe("ServiceTemplate Validation", func() {
Context("when name is not allowed", func() {
It("should return an error if the name is present", func() {
- serviceSpecs = ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "invalid-name"},
+ serviceSpecs = apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "invalid-name"},
}
errs := validateServiceTemplate(path, false, serviceSpecs)
@@ -4885,8 +4847,8 @@ var _ = Describe("ServiceTemplate Validation", func() {
})
It("should not return an error if the name is empty", func() {
- serviceSpecs = ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: ""},
+ serviceSpecs = apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: ""},
}
errs := validateServiceTemplate(path, false, serviceSpecs)
@@ -4896,8 +4858,8 @@ var _ = Describe("ServiceTemplate Validation", func() {
Context("when selector is present", func() {
It("should return an error if the selector is present", func() {
- serviceSpecs = ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "valid-name"},
+ serviceSpecs = apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "valid-name"},
Spec: corev1.ServiceSpec{
Selector: map[string]string{"app": "test"},
},
@@ -4909,8 +4871,8 @@ var _ = Describe("ServiceTemplate Validation", func() {
})
It("should not return an error if the selector is absent", func() {
- serviceSpecs = ServiceTemplateSpec{
- ObjectMeta: Metadata{Name: "valid-name"},
+ serviceSpecs = apiv1.ServiceTemplateSpec{
+ ObjectMeta: apiv1.Metadata{Name: "valid-name"},
Spec: corev1.ServiceSpec{
Selector: map[string]string{},
},
@@ -4922,61 +4884,3 @@ var _ = Describe("ServiceTemplate Validation", func() {
})
})
})
-
-var _ = Describe("setDefaultPlugins", func() {
- It("adds pre-defined plugins if not already present", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Plugins: []PluginConfiguration{
- {Name: "existing-plugin", Enabled: ptr.To(true)},
- },
- },
- }
- config := &configuration.Data{
- IncludePlugins: "predefined-plugin1,predefined-plugin2",
- }
-
- cluster.setDefaultPlugins(config)
-
- Expect(cluster.Spec.Plugins).To(
- ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)}))
- Expect(cluster.Spec.Plugins).To(
- ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)}))
- Expect(cluster.Spec.Plugins).To(
- ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)}))
- })
-
- It("does not add pre-defined plugins if already present", func() {
- cluster := &Cluster{
- Spec: ClusterSpec{
- Plugins: []PluginConfiguration{
- {Name: "predefined-plugin1", Enabled: ptr.To(false)},
- },
- },
- }
- config := &configuration.Data{
- IncludePlugins: "predefined-plugin1,predefined-plugin2",
- }
-
- cluster.setDefaultPlugins(config)
-
- Expect(cluster.Spec.Plugins).To(HaveLen(2))
- Expect(cluster.Spec.Plugins).To(
- ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)}))
- Expect(cluster.Spec.Plugins).To(
- ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)}))
- })
-
- It("handles empty plugin list gracefully", func() {
- cluster := &Cluster{}
- config := &configuration.Data{
- IncludePlugins: "predefined-plugin1",
- }
-
- cluster.setDefaultPlugins(config)
-
- Expect(cluster.Spec.Plugins).To(HaveLen(1))
- Expect(cluster.Spec.Plugins).To(
- ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)}))
- })
-})
diff --git a/internal/webhook/v1/doc.go b/internal/webhook/v1/doc.go
new file mode 100644
index 0000000000..8298d1e71a
--- /dev/null
+++ b/internal/webhook/v1/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains the webhooks for the postgresql v1 API group
+package v1
diff --git a/internal/webhook/v1/pooler_webhook.go b/internal/webhook/v1/pooler_webhook.go
new file mode 100644
index 0000000000..5526955a89
--- /dev/null
+++ b/internal/webhook/v1/pooler_webhook.go
@@ -0,0 +1,254 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+)
+
+// AllowedPgbouncerGenericConfigurationParameters is the list of allowed parameters for PgBouncer
+var AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{
+ "application_name_add_host",
+ "autodb_idle_timeout",
+ "cancel_wait_timeout",
+ "client_idle_timeout",
+ "client_login_timeout",
+ "default_pool_size",
+ "disable_pqexec",
+ "dns_max_ttl",
+ "dns_nxdomain_ttl",
+ "idle_transaction_timeout",
+ "ignore_startup_parameters",
+ "listen_backlog",
+ "log_connections",
+ "log_disconnections",
+ "log_pooler_errors",
+ "log_stats",
+ "max_client_conn",
+ "max_db_connections",
+ "max_packet_size",
+ "max_prepared_statements",
+ "max_user_connections",
+ "min_pool_size",
+ "pkt_buf",
+ "query_timeout",
+ "query_wait_timeout",
+ "reserve_pool_size",
+ "reserve_pool_timeout",
+ "sbuf_loopcnt",
+ "server_check_delay",
+ "server_check_query",
+ "server_connect_timeout",
+ "server_fast_close",
+ "server_idle_timeout",
+ "server_lifetime",
+ "server_login_retry",
+ "server_reset_query",
+ "server_reset_query_always",
+ "server_round_robin",
+ "server_tls_ciphers",
+ "server_tls_protocols",
+ "stats_period",
+ "suspend_timeout",
+ "tcp_defer_accept",
+ "tcp_socket_buffer",
+ "tcp_keepalive",
+ "tcp_keepcnt",
+ "tcp_keepidle",
+ "tcp_keepintvl",
+ "tcp_user_timeout",
+ "track_extra_parameters",
+ "verbose",
+})
+
+// poolerLog is for logging in this package.
+var poolerLog = log.WithName("pooler-resource").WithValues("version", "v1")
+
+// SetupPoolerWebhookWithManager registers the webhook for Pooler in the manager.
+func SetupPoolerWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Pooler{}).
+ WithValidator(&PoolerCustomValidator{}).
+ Complete()
+}
+
+// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
+// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here.
+// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook.
+// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-pooler,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=poolers,versions=v1,name=vpooler.cnpg.io,sideEffects=None
+
+// PoolerCustomValidator struct is responsible for validating the Pooler resource
+// when it is created, updated, or deleted.
+type PoolerCustomValidator struct{}
+
+var _ webhook.CustomValidator = &PoolerCustomValidator{}
+
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Pooler.
+func (v *PoolerCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ pooler, ok := obj.(*apiv1.Pooler)
+ if !ok {
+ return nil, fmt.Errorf("expected a Pooler object but got %T", obj)
+ }
+ poolerLog.Info("Validation for Pooler upon creation", "name", pooler.GetName(), "namespace", pooler.GetNamespace())
+
+ var warns admission.Warnings
+ if !pooler.IsAutomatedIntegration() {
+ poolerLog.Info("Pooler not automatically configured, manual configuration required",
+ "name", pooler.Name, "namespace", pooler.Namespace, "cluster", pooler.Spec.Cluster.Name)
+ warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+
+ "Manually configure it as described in the docs.", pooler.Name, pooler.Spec.Cluster.Name, pooler.Namespace))
+ }
+
+ allErrs := v.validate(pooler)
+
+ if len(allErrs) == 0 {
+ return warns, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"},
+ pooler.Name, allErrs)
+}
+
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Pooler.
+func (v *PoolerCustomValidator) ValidateUpdate(
+ _ context.Context,
+ oldObj, newObj runtime.Object,
+) (admission.Warnings, error) {
+ pooler, ok := newObj.(*apiv1.Pooler)
+ if !ok {
+ return nil, fmt.Errorf("expected a Pooler object for the newObj but got %T", newObj)
+ }
+
+ oldPooler, ok := oldObj.(*apiv1.Pooler)
+ if !ok {
+ return nil, fmt.Errorf("expected a Pooler object for the oldObj but got %T", oldObj)
+ }
+
+ poolerLog.Info("Validation for Pooler upon update", "name", pooler.GetName(), "namespace", pooler.GetNamespace())
+
+ var warns admission.Warnings
+ if oldPooler.IsAutomatedIntegration() && !pooler.IsAutomatedIntegration() {
+ poolerLog.Info("Pooler not automatically configured, manual configuration required",
+ "name", pooler.Name, "namespace", pooler.Namespace, "cluster", pooler.Spec.Cluster.Name)
+ warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+
+ "Manually configure it as described in the docs.", pooler.Name, pooler.Spec.Cluster.Name, pooler.Namespace))
+ }
+
+ allErrs := v.validate(pooler)
+ if len(allErrs) == 0 {
+ return warns, nil
+ }
+
+ return warns, apierrors.NewInvalid(
+ schema.GroupKind{Group: "pooler.cnpg.io", Kind: "Pooler"},
+ pooler.Name, allErrs)
+}
+
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Pooler.
+func (v *PoolerCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
+ pooler, ok := obj.(*apiv1.Pooler)
+ if !ok {
+ return nil, fmt.Errorf("expected a Pooler object but got %T", obj)
+ }
+ poolerLog.Info("Validation for Pooler upon deletion", "name", pooler.GetName(), "namespace", pooler.GetNamespace())
+
+ // TODO(user): fill in your validation logic upon object deletion.
+
+ return nil, nil
+}
+
+func (v *PoolerCustomValidator) validatePgBouncer(r *apiv1.Pooler) field.ErrorList {
+ var result field.ErrorList
+ switch {
+ case r.Spec.PgBouncer == nil:
+ result = append(result,
+ field.Invalid(
+ field.NewPath("spec", "pgbouncer"),
+ "", "required pgbouncer configuration"))
+ case r.Spec.PgBouncer.AuthQuerySecret != nil && r.Spec.PgBouncer.AuthQuerySecret.Name != "" &&
+ r.Spec.PgBouncer.AuthQuery == "":
+ result = append(result,
+ field.Invalid(
+ field.NewPath("spec", "pgbouncer", "authQuery"),
+ "", "must specify an auth query when providing an auth query secret"))
+ case (r.Spec.PgBouncer.AuthQuerySecret == nil || r.Spec.PgBouncer.AuthQuerySecret.Name == "") &&
+ r.Spec.PgBouncer.AuthQuery != "":
+ result = append(result,
+ field.Invalid(
+ field.NewPath("spec", "pgbouncer", "authQuerySecret", "name"),
+ "", "must specify an existing auth query secret when providing an auth query secret"))
+ }
+
+ if r.Spec.PgBouncer != nil && len(r.Spec.PgBouncer.Parameters) > 0 {
+ result = append(result, v.validatePgbouncerGenericParameters(r)...)
+ }
+
+ return result
+}
+
+func (v *PoolerCustomValidator) validateCluster(r *apiv1.Pooler) field.ErrorList {
+ var result field.ErrorList
+ if r.Spec.Cluster.Name == "" {
+ result = append(result,
+ field.Invalid(
+ field.NewPath("spec", "cluster", "name"),
+ "", "must specify a cluster name"))
+ }
+ if r.Spec.Cluster.Name == r.Name {
+ result = append(result,
+ field.Invalid(
+ field.NewPath("metadata", "name"),
+ r.Name, "the pooler resource cannot have the same name of a cluster"))
+ }
+ return result
+}
+
+// validate validates the configuration of a Pooler, returning
+// a list of errors
+func (v *PoolerCustomValidator) validate(r *apiv1.Pooler) (allErrs field.ErrorList) {
+ allErrs = append(allErrs, v.validatePgBouncer(r)...)
+ allErrs = append(allErrs, v.validateCluster(r)...)
+ return allErrs
+}
+
+// validatePgbouncerGenericParameters validates pgbouncer parameters
+func (v *PoolerCustomValidator) validatePgbouncerGenericParameters(r *apiv1.Pooler) field.ErrorList {
+ var result field.ErrorList
+
+ for param := range r.Spec.PgBouncer.Parameters {
+ if !AllowedPgbouncerGenericConfigurationParameters.Has(param) {
+ result = append(result,
+ field.Invalid(
+ field.NewPath("spec", "cluster", "parameters"),
+ param, "Invalid or reserved parameter"))
+ }
+ }
+ return result
+}
diff --git a/api/v1/pooler_webhook_test.go b/internal/webhook/v1/pooler_webhook_test.go
similarity index 52%
rename from api/v1/pooler_webhook_test.go
rename to internal/webhook/v1/pooler_webhook_test.go
index a1791248c9..c49da31e18 100644
--- a/api/v1/pooler_webhook_test.go
+++ b/internal/webhook/v1/pooler_webhook_test.go
@@ -19,113 +19,120 @@ package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Pooler validation", func() {
+ var v *PoolerCustomValidator
+ BeforeEach(func() {
+ v = &PoolerCustomValidator{}
+ })
+
It("doesn't allow specifying authQuerySecret without any authQuery", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- PgBouncer: &PgBouncerSpec{
- AuthQuerySecret: &LocalObjectReference{
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ PgBouncer: &apiv1.PgBouncerSpec{
+ AuthQuerySecret: &apiv1.LocalObjectReference{
Name: "test",
},
},
},
}
- Expect(pooler.validatePgBouncer()).NotTo(BeEmpty())
+ Expect(v.validatePgBouncer(pooler)).NotTo(BeEmpty())
})
It("doesn't allow specifying authQuery without any authQuerySecret", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- PgBouncer: &PgBouncerSpec{
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ PgBouncer: &apiv1.PgBouncerSpec{
AuthQuery: "test",
},
},
}
- Expect(pooler.validatePgBouncer()).NotTo(BeEmpty())
+ Expect(v.validatePgBouncer(pooler)).NotTo(BeEmpty())
})
It("allows having both authQuery and authQuerySecret", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- PgBouncer: &PgBouncerSpec{
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ PgBouncer: &apiv1.PgBouncerSpec{
AuthQuery: "test",
- AuthQuerySecret: &LocalObjectReference{
+ AuthQuerySecret: &apiv1.LocalObjectReference{
Name: "test",
},
},
},
}
- Expect(pooler.validatePgBouncer()).To(BeEmpty())
+ Expect(v.validatePgBouncer(pooler)).To(BeEmpty())
})
It("allows the autoconfiguration mode", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- PgBouncer: &PgBouncerSpec{},
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ PgBouncer: &apiv1.PgBouncerSpec{},
},
}
- Expect(pooler.validatePgBouncer()).To(BeEmpty())
+ Expect(v.validatePgBouncer(pooler)).To(BeEmpty())
})
It("doesn't allow not specifying a cluster name", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- Cluster: LocalObjectReference{Name: ""},
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ Cluster: apiv1.LocalObjectReference{Name: ""},
},
}
- Expect(pooler.validateCluster()).NotTo(BeEmpty())
+ Expect(v.validateCluster(pooler)).NotTo(BeEmpty())
})
It("doesn't allow to have a pooler with the same name of the cluster", func() {
- pooler := Pooler{
+ pooler := &apiv1.Pooler{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
- Spec: PoolerSpec{
- Cluster: LocalObjectReference{
+ Spec: apiv1.PoolerSpec{
+ Cluster: apiv1.LocalObjectReference{
Name: "test",
},
},
}
- Expect(pooler.validateCluster()).NotTo(BeEmpty())
+ Expect(v.validateCluster(pooler)).NotTo(BeEmpty())
})
It("doesn't complain when specifying a cluster name", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- Cluster: LocalObjectReference{Name: "cluster-example"},
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ Cluster: apiv1.LocalObjectReference{Name: "cluster-example"},
},
}
- Expect(pooler.validateCluster()).To(BeEmpty())
+ Expect(v.validateCluster(pooler)).To(BeEmpty())
})
It("does complain when given a fixed parameter", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- PgBouncer: &PgBouncerSpec{
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ PgBouncer: &apiv1.PgBouncerSpec{
Parameters: map[string]string{"pool_mode": "test"},
},
},
}
- Expect(pooler.validatePgbouncerGenericParameters()).NotTo(BeEmpty())
+ Expect(v.validatePgbouncerGenericParameters(pooler)).NotTo(BeEmpty())
})
It("does not complain when given a valid parameter", func() {
- pooler := Pooler{
- Spec: PoolerSpec{
- PgBouncer: &PgBouncerSpec{
+ pooler := &apiv1.Pooler{
+ Spec: apiv1.PoolerSpec{
+ PgBouncer: &apiv1.PgBouncerSpec{
Parameters: map[string]string{"verbose": "10"},
},
},
}
- Expect(pooler.validatePgbouncerGenericParameters()).To(BeEmpty())
+ Expect(v.validatePgbouncerGenericParameters(pooler)).To(BeEmpty())
})
})
diff --git a/internal/webhook/v1/scheduledbackup_webhook.go b/internal/webhook/v1/scheduledbackup_webhook.go
new file mode 100644
index 0000000000..fdf6ccdbf3
--- /dev/null
+++ b/internal/webhook/v1/scheduledbackup_webhook.go
@@ -0,0 +1,190 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/robfig/cron"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// scheduledBackupLog is for logging in this package.
+var scheduledBackupLog = log.WithName("scheduledbackup-resource").WithValues("version", "v1")
+
+// SetupScheduledBackupWebhookWithManager registers the webhook for ScheduledBackup in the manager.
+func SetupScheduledBackupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.ScheduledBackup{}).
+ WithValidator(&ScheduledBackupCustomValidator{}).
+ WithDefaulter(&ScheduledBackupCustomDefaulter{}).
+ Complete()
+}
+
+// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-scheduledbackup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,verbs=create;update,versions=v1,name=mscheduledbackup.cnpg.io,sideEffects=None
+
+// ScheduledBackupCustomDefaulter struct is responsible for setting default values on the custom resource of the
+// Kind ScheduledBackup when those are created or updated.
+type ScheduledBackupCustomDefaulter struct{}
+
+var _ webhook.CustomDefaulter = &ScheduledBackupCustomDefaulter{}
+
+// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind ScheduledBackup.
+func (d *ScheduledBackupCustomDefaulter) Default(_ context.Context, obj runtime.Object) error {
+ scheduledBackup, ok := obj.(*apiv1.ScheduledBackup)
+ if !ok {
+ return fmt.Errorf("expected an ScheduledBackup object but got %T", obj)
+ }
+ scheduledBackupLog.Info("Defaulting for ScheduledBackup",
+ "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace())
+
+ // TODO(user): fill in your defaulting logic.
+
+ return nil
+}
+
+// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
+// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here.
+// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook.
+// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-scheduledbackup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,versions=v1,name=vscheduledbackup.cnpg.io,sideEffects=None
+
+// ScheduledBackupCustomValidator struct is responsible for validating the ScheduledBackup resource
+// when it is created, updated, or deleted.
+type ScheduledBackupCustomValidator struct {
+ // TODO(user): Add more fields as needed for validation
+}
+
+var _ webhook.CustomValidator = &ScheduledBackupCustomValidator{}
+
+// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup.
+func (v *ScheduledBackupCustomValidator) ValidateCreate(
+ _ context.Context,
+ obj runtime.Object,
+) (admission.Warnings, error) {
+ scheduledBackup, ok := obj.(*apiv1.ScheduledBackup)
+ if !ok {
+ return nil, fmt.Errorf("expected a ScheduledBackup object but got %T", obj)
+ }
+ scheduledBackupLog.Info("Validation for ScheduledBackup upon creation",
+ "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace())
+
+ warnings, allErrs := v.validate(scheduledBackup)
+ if len(allErrs) == 0 {
+ return warnings, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "ScheduledBackup"},
+ scheduledBackup.Name, allErrs)
+}
+
+// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup.
+func (v *ScheduledBackupCustomValidator) ValidateUpdate(
+ _ context.Context,
+ _, newObj runtime.Object,
+) (admission.Warnings, error) {
+ scheduledBackup, ok := newObj.(*apiv1.ScheduledBackup)
+ if !ok {
+ return nil, fmt.Errorf("expected a ScheduledBackup object for the newObj but got %T", newObj)
+ }
+ scheduledBackupLog.Info("Validation for ScheduledBackup upon update",
+ "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace())
+
+ warnings, allErrs := v.validate(scheduledBackup)
+ if len(allErrs) == 0 {
+ return warnings, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ schema.GroupKind{Group: "scheduledBackup.cnpg.io", Kind: "ScheduledBackup"},
+ scheduledBackup.Name, allErrs)
+}
+
+// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup.
+func (v *ScheduledBackupCustomValidator) ValidateDelete(
+ _ context.Context,
+ obj runtime.Object,
+) (admission.Warnings, error) {
+ scheduledBackup, ok := obj.(*apiv1.ScheduledBackup)
+ if !ok {
+ return nil, fmt.Errorf("expected a ScheduledBackup object but got %T", obj)
+ }
+ scheduledBackupLog.Info("Validation for ScheduledBackup upon deletion",
+ "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace())
+
+ // TODO(user): fill in your validation logic upon object deletion.
+
+ return nil, nil
+}
+
+func (v *ScheduledBackupCustomValidator) validate(r *apiv1.ScheduledBackup) (admission.Warnings, field.ErrorList) {
+ var result field.ErrorList
+ var warnings admission.Warnings
+
+ if _, err := cron.Parse(r.GetSchedule()); err != nil {
+ result = append(result,
+ field.Invalid(
+ field.NewPath("spec", "schedule"),
+ r.Spec.Schedule, err.Error()))
+ } else if len(strings.Fields(r.Spec.Schedule)) != 6 {
+ warnings = append(
+ warnings,
+ "Schedule parameter may not have the right number of arguments "+
+ "(usually six arguments are needed)",
+ )
+ }
+
+ if r.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "method"),
+ r.Spec.Method,
+ "Cannot use volumeSnapshot backup method due to missing "+
+ "VolumeSnapshot CRD. If you installed the CRD after having "+
+ "started the operator, please restart it to enable "+
+ "VolumeSnapshot support",
+ ))
+ }
+
+ if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.Online != nil {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "online"),
+ r.Spec.Online,
+ "Online parameter can be specified only if the method is volumeSnapshot",
+ ))
+ }
+
+ if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "onlineConfiguration"),
+ r.Spec.OnlineConfiguration,
+ "OnlineConfiguration parameter can be specified only if the method is volumeSnapshot",
+ ))
+ }
+
+ return warnings, result
+}
diff --git a/internal/webhook/v1/scheduledbackup_webhook_test.go b/internal/webhook/v1/scheduledbackup_webhook_test.go
new file mode 100644
index 0000000000..173df06ebc
--- /dev/null
+++ b/internal/webhook/v1/scheduledbackup_webhook_test.go
@@ -0,0 +1,126 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/utils/ptr"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Validate schedule", func() {
+ var v *ScheduledBackupCustomValidator
+ BeforeEach(func() {
+ v = &ScheduledBackupCustomValidator{}
+ })
+
+ It("doesn't complain if there's a schedule", func() {
+ schedule := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Schedule: "0 0 0 * * *",
+ },
+ }
+
+ warnings, result := v.validate(schedule)
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(BeEmpty())
+ })
+
+ It("warn the user if the schedule has a wrong number of arguments", func() {
+ schedule := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Schedule: "1 2 3 4 5",
+ },
+ }
+
+ warnings, result := v.validate(schedule)
+ Expect(warnings).To(HaveLen(1))
+ Expect(result).To(BeEmpty())
+ })
+
+ It("complain with a wrong time", func() {
+ schedule := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Schedule: "0 0 0 * * * 1996",
+ },
+ }
+
+ warnings, result := v.validate(schedule)
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(HaveLen(1))
+ })
+
+ It("doesn't complain if VolumeSnapshot CRD is present", func() {
+ schedule := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Schedule: "0 0 0 * * *",
+ Method: apiv1.BackupMethodVolumeSnapshot,
+ },
+ }
+ utils.SetVolumeSnapshot(true)
+
+ warnings, result := v.validate(schedule)
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(BeEmpty())
+ })
+
+ It("complains if VolumeSnapshot CRD is not present", func() {
+ schedule := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Schedule: "0 0 0 * * *",
+ Method: apiv1.BackupMethodVolumeSnapshot,
+ },
+ }
+ utils.SetVolumeSnapshot(false)
+ warnings, result := v.validate(schedule)
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(HaveLen(1))
+ Expect(result[0].Field).To(Equal("spec.method"))
+ })
+
+ It("complains if online is set on a barman backup", func() {
+ scheduledBackup := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Method: apiv1.BackupMethodBarmanObjectStore,
+ Online: ptr.To(true),
+ Schedule: "* * * * * *",
+ },
+ }
+ warnings, result := v.validate(scheduledBackup)
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(HaveLen(1))
+ Expect(result[0].Field).To(Equal("spec.online"))
+ })
+
+ It("complains if onlineConfiguration is set on a barman backup", func() {
+ scheduledBackup := &apiv1.ScheduledBackup{
+ Spec: apiv1.ScheduledBackupSpec{
+ Method: apiv1.BackupMethodBarmanObjectStore,
+ OnlineConfiguration: &apiv1.OnlineConfiguration{},
+ Schedule: "* * * * * *",
+ },
+ }
+ warnings, result := v.validate(scheduledBackup)
+ Expect(warnings).To(BeEmpty())
+ Expect(result).To(HaveLen(1))
+ Expect(result[0].Field).To(Equal("spec.onlineConfiguration"))
+ })
+})
diff --git a/internal/webhook/v1/suite_test.go b/internal/webhook/v1/suite_test.go
new file mode 100644
index 0000000000..5bd0c55f58
--- /dev/null
+++ b/internal/webhook/v1/suite_test.go
@@ -0,0 +1,30 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestAPIs(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "Webhook Suite")
+}
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 2c4806bc06..200bc57248 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -41,6 +41,7 @@ import (
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ webhookv1 "github.com/cloudnative-pg/cloudnative-pg/internal/webhook/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
@@ -320,7 +321,6 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env
func AssertClusterDefault(
namespace string,
clusterName string,
- isExpectedToDefault bool,
env *environment.TestingEnvironment,
) {
By("having a Cluster object populated with default values", func() {
@@ -335,12 +335,10 @@ func AssertClusterDefault(
g.Expect(err).ToNot(HaveOccurred())
}).Should(Succeed())
- validationErr := cluster.Validate()
- if isExpectedToDefault {
- Expect(validationErr).Should(BeEmpty(), validationErr)
- } else {
- Expect(validationErr).ShouldNot(BeEmpty(), validationErr)
- }
+ validator := webhookv1.ClusterCustomValidator{}
+ validationWarn, validationErr := validator.ValidateCreate(env.Ctx, cluster)
+ Expect(validationWarn).To(BeEmpty())
+ Expect(validationErr).ToNot(HaveOccurred())
})
}
diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go
index 2171638e14..e0d496f0b2 100644
--- a/tests/e2e/webhook_test.go
+++ b/tests/e2e/webhook_test.go
@@ -48,7 +48,6 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
)
var webhookNamespace, clusterName string
- var clusterIsDefaulted bool
var err error
BeforeEach(func() {
@@ -64,7 +63,6 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
It("checks if webhook works as expected", func() {
webhookNamespacePrefix := "webhook-test"
- clusterIsDefaulted = true
By("having a deployment for the operator in state ready", func() {
// Make sure that we have at least one operator already working
err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1)
@@ -80,12 +78,11 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env)
// Check if cluster is ready and the default values are populated
- AssertClusterDefault(webhookNamespace, clusterName, clusterIsDefaulted, env)
+ AssertClusterDefault(webhookNamespace, clusterName, env)
})
It("Does not crash the operator when disabled", func() {
webhookNamespacePrefix := "no-webhook-test"
- clusterIsDefaulted = true
mWebhook, admissionNumber, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutatingWebhook)
Expect(err).ToNot(HaveOccurred())
@@ -120,7 +117,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env)
// Check if cluster is ready and has no default value in the object
- AssertClusterDefault(webhookNamespace, clusterName, clusterIsDefaulted, env)
+ AssertClusterDefault(webhookNamespace, clusterName, env)
// Make sure the operator is intact and not crashing
By("having a deployment for the operator in state ready", func() {
From e5ad1e0f41689cfd2f8437acb27aeff8d09eea3a Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 10 Jan 2025 10:39:31 +0100
Subject: [PATCH 300/836] chore(deps): update spellcheck to v0.46.0 (main)
(#6539)
---
.github/workflows/spellcheck.yml | 2 +-
Makefile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml
index 1fd12f3085..27de6b2c8e 100644
--- a/.github/workflows/spellcheck.yml
+++ b/.github/workflows/spellcheck.yml
@@ -28,4 +28,4 @@ jobs:
uses: actions/checkout@v4
- name: Spellcheck
- uses: rojopolis/spellcheck-github-actions@0.45.0
+ uses: rojopolis/spellcheck-github-actions@0.46.0
diff --git a/Makefile b/Makefile
index d08b303300..6f7e280063 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
GORELEASER_VERSION ?= v2.5.1
-SPELLCHECK_VERSION ?= 0.45.0
+SPELLCHECK_VERSION ?= 0.46.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.38.0
OPM_VERSION ?= v1.49.0
From a0800ca858b4ad260c8a9fc965b482f445090a6a Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 10 Jan 2025 11:08:37 +0100
Subject: [PATCH 301/836] chore(deps): update
agilepathway/pull-request-label-checker docker tag to v1.6.61 (main) (#6553)
---
.github/workflows/require-labels.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml
index 1bbbfb1d23..1bb64dfd17 100644
--- a/.github/workflows/require-labels.yml
+++ b/.github/workflows/require-labels.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-24.04
steps:
- name: Require labels
- uses: docker://agilepathway/pull-request-label-checker:v1.6.60
+ uses: docker://agilepathway/pull-request-label-checker:v1.6.61
with:
any_of: "ok to merge :ok_hand:"
none_of: "do not merge"
From c764308547f834df1fc2f05c32b462b05af9d4be Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 11 Jan 2025 10:43:19 +0100
Subject: [PATCH 302/836] chore(deps): update dependency
kubernetes-csi/external-resizer to v1.13.0 (main) (#6558)
---
hack/setup-cluster.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index 7bd5e66072..3d4ee152dc 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -29,7 +29,7 @@ K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
EXTERNAL_PROVISIONER_VERSION=v5.1.0
-EXTERNAL_RESIZER_VERSION=v1.12.0
+EXTERNAL_RESIZER_VERSION=v1.13.0
EXTERNAL_ATTACHER_VERSION=v4.8.0
K8S_VERSION=${K8S_VERSION-}
KUBECTL_VERSION=${KUBECTL_VERSION-}
From 3d52abbc0c16762dd5716fcb52cc3fa02fc629fc Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 11 Jan 2025 13:18:36 +0100
Subject: [PATCH 303/836] fix(deps): update module
sigs.k8s.io/controller-runtime to v0.19.4 (main) (#6538)
---
go.mod | 3 ++-
go.sum | 6 ++++--
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 5058c8e429..508e56fbe8 100644
--- a/go.mod
+++ b/go.mod
@@ -46,7 +46,7 @@ require (
k8s.io/cli-runtime v0.32.0
k8s.io/client-go v0.32.0
k8s.io/utils v0.0.0-20241210054802-24370beab758
- sigs.k8s.io/controller-runtime v0.19.3
+ sigs.k8s.io/controller-runtime v0.19.4
sigs.k8s.io/yaml v1.4.0
)
@@ -57,6 +57,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/fatih/color v1.17.0 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
diff --git a/go.sum b/go.sum
index ff99792126..1c0c76ec97 100644
--- a/go.sum
+++ b/go.sum
@@ -39,6 +39,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
@@ -295,8 +297,8 @@ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJ
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
-sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
+sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo=
+sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
From 4a150f42a7ffd6f09c099017a7058f54019a2364 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sat, 11 Jan 2025 21:29:42 +0100
Subject: [PATCH 304/836] chore(deps): update dependency
kubernetes-csi/external-resizer to v1.13.1 (main) (#6573)
---
hack/setup-cluster.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index 3d4ee152dc..a6c9c29f9e 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -29,7 +29,7 @@ K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
EXTERNAL_PROVISIONER_VERSION=v5.1.0
-EXTERNAL_RESIZER_VERSION=v1.13.0
+EXTERNAL_RESIZER_VERSION=v1.13.1
EXTERNAL_ATTACHER_VERSION=v4.8.0
K8S_VERSION=${K8S_VERSION-}
KUBECTL_VERSION=${KUBECTL_VERSION-}
From 25e061312c472b7406c901d1adae33771eb4d515 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 13 Jan 2025 09:38:57 +0100
Subject: [PATCH 305/836] chore(deps): update operator framework to v1.39.0
(main) (#6581)
---
Makefile | 2 +-
config/olm-scorecard/patches/basic.config.yaml | 2 +-
config/olm-scorecard/patches/olm.config.yaml | 10 +++++-----
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/Makefile b/Makefile
index 6f7e280063..c71f2e7f95 100644
--- a/Makefile
+++ b/Makefile
@@ -46,7 +46,7 @@ CONTROLLER_TOOLS_VERSION ?= v0.16.5
GORELEASER_VERSION ?= v2.5.1
SPELLCHECK_VERSION ?= 0.46.0
WOKE_VERSION ?= 0.19.0
-OPERATOR_SDK_VERSION ?= v1.38.0
+OPERATOR_SDK_VERSION ?= v1.39.0
OPM_VERSION ?= v1.49.0
PREFLIGHT_VERSION ?= 1.11.1
OPENSHIFT_VERSIONS ?= v4.12-v4.18
diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml
index 84683cf8d7..b89ce3bf90 100644
--- a/config/olm-scorecard/patches/basic.config.yaml
+++ b/config/olm-scorecard/patches/basic.config.yaml
@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- basic-check-spec
- image: quay.io/operator-framework/scorecard-test:v1.38.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.0
labels:
suite: basic
test: basic-check-spec-test
diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml
index 43f40a8b3f..7eff5c9099 100644
--- a/config/olm-scorecard/patches/olm.config.yaml
+++ b/config/olm-scorecard/patches/olm.config.yaml
@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- olm-bundle-validation
- image: quay.io/operator-framework/scorecard-test:v1.38.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.0
labels:
suite: olm
test: olm-bundle-validation-test
@@ -14,7 +14,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-validation
- image: quay.io/operator-framework/scorecard-test:v1.38.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.0
labels:
suite: olm
test: olm-crds-have-validation-test
@@ -24,7 +24,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-resources
- image: quay.io/operator-framework/scorecard-test:v1.38.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.0
labels:
suite: olm
test: olm-crds-have-resources-test
@@ -34,7 +34,7 @@
entrypoint:
- scorecard-test
- olm-spec-descriptors
- image: quay.io/operator-framework/scorecard-test:v1.38.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.0
labels:
suite: olm
test: olm-spec-descriptors-test
@@ -44,7 +44,7 @@
entrypoint:
- scorecard-test
- olm-status-descriptors
- image: quay.io/operator-framework/scorecard-test:v1.38.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.0
labels:
suite: olm
test: olm-status-descriptors-test
From 347558ee3dffb8a69f136dd6fafa1090554b21d7 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 13 Jan 2025 11:49:45 +0100
Subject: [PATCH 306/836] chore(deps): update dependency vmware-tanzu/velero to
v1.15.1 (main) (#6580)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 6ccdc1b64b..96260e749b 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -1343,7 +1343,7 @@ jobs:
name: Setup Velero
uses: nick-fields/retry@v3
env:
- VELERO_VERSION: "v1.15.0"
+ VELERO_VERSION: "v1.15.1"
VELERO_AWS_PLUGIN_VERSION: "v1.11.0"
with:
timeout_minutes: 10
From 6f780d35c3c094f0bf57482568dd4377c41c3e15 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 20:51:55 +0100
Subject: [PATCH 307/836] fix(deps): update module google.golang.org/grpc to
v1.69.4 (main) (#6587)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 508e56fbe8..7ec79071cc 100644
--- a/go.mod
+++ b/go.mod
@@ -38,7 +38,7 @@ require (
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/term v0.28.0
- google.golang.org/grpc v1.69.2
+ google.golang.org/grpc v1.69.4
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.0
k8s.io/apiextensions-apiserver v0.32.0
diff --git a/go.sum b/go.sum
index 1c0c76ec97..9f88f10806 100644
--- a/go.sum
+++ b/go.sum
@@ -266,8 +266,8 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
-google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
-google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
+google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
From 93d40efde1473c5e07522703ca6ba38639098a3c Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 16 Jan 2025 10:21:20 +0100
Subject: [PATCH 308/836] chore(deps): update dependency
vmware-tanzu/velero-plugin-for-aws to v1.11.1 (main) (#6610)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 96260e749b..7eb762fd35 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -1344,7 +1344,7 @@ jobs:
uses: nick-fields/retry@v3
env:
VELERO_VERSION: "v1.15.1"
- VELERO_AWS_PLUGIN_VERSION: "v1.11.0"
+ VELERO_AWS_PLUGIN_VERSION: "v1.11.1"
with:
timeout_minutes: 10
max_attempts: 3
From 404f0acb2087485408339a0a6041ebcc1c99240e Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Tue, 21 Jan 2025 13:35:08 +0100
Subject: [PATCH 309/836] docs: add OpenSSF best practices badge (#6627)
Closes #6626
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Gabriele Bartolini
Co-authored-by: Gabriele Bartolini
---
README.md | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 8a8e8df0d6..e9e3eda545 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
-[](https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg)
+[][cncf-landscape]
[][latest-release]
[][license]
+[][openssf]
[![Documentation][documentation-badge]][documentation]
[][stackoverflow]
@@ -160,8 +161,10 @@ organization to this list!
are trademarks or registered trademarks of the PostgreSQL Community Association
of Canada, and used with their permission.*
+[cncf-landscape]: https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg
[stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg
[latest-release]: https://github.com/cloudnative-pg/cloudnative-pg/releases/latest
[documentation]: https://cloudnative-pg.io/documentation/current/
[license]: https://github.com/cloudnative-pg/cloudnative-pg?tab=Apache-2.0-1-ov-file#readme
+[openssf]: https://www.bestpractices.dev/projects/9933
[documentation-badge]: https://img.shields.io/badge/Documentation-white?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGN0lEQVR4nJRXXWwcVxU%2B8%2F%2BzP%2BPZtR2v7dqy07jUJUALNaiK6lZyUVVKWgGKaIv8QCMekBAVQlQICcEzVZFQVYFKQhASEBHlISJPCRJEshTFChgrIYHEiYMh69jetffHM7Mzc%2B9Bs7vjnTs7yZpZWbt37s%2F5zne%2Bc861CD0eXRkbHc3NfjeffvxNAGEAgULD2756v35%2B3qe1Nc4fnQVEXlA2LnOcXlCF8S%2B6vvVgq%2FL3M65X3e51PvfQCU4WJgZe%2B8GQ8fS7AKgjBB8KEHwjDXZSjkf0CREAaXM2eI9c65siqWxWl360Xl74ANHz%2Fy8AitxnTBfmz%2BhyYS4wGhwObQCIHSA0AigOMBzvOsXzd4pnjyL6NMmWEH8hi2b28Og3%2FqRJA0ewfQy0v1vGO2NovwPo%2FEU%2FwVgSU1PI%2BSu79v3lJAB8HM%2BTI%2FO%2FUUXzM4xHIe0xI4DdRqOAwnF%2F38ePPyzaDIDh%2FMxcWh462m08aojuGY97C0nrAEHg9BlF0fmeAPr0J15vbaKsp0BZQzEDEAlP9B209UIIVXUta%2FQEQHwxgxFjTc%2BRskAwrgVWmHtg22vMPJwLDqGUNJIAMHVAkGu3WdpZz6NAkgSXpINSycluV28er1a3rJ4M3F2%2F9AtCvXKycRrTQttrjINjxxxIL9jevxdaDHU%2FTBr6pL5ruzuLZubgUQBOY2hPij3GBUe7tBCMBRE2KrXVSz0BBI%2FtPVgtV%2F%2FxkZ5WSjI%2F%2BFIXC3sHJwgT4yFqrZFFTSlVrp3sGYLwcfxSmXCbS00j2Ms4K7qkOsFx6qdTuiHtG4AimfmM8NyvOvR2G48qXtZ2fsfrN7%2BqpcRyUp0glKiimDm4TwAcHBp%2B9WeA4ki0GMWNR9OVF8BZvn7xtI%2FF09H8jzLEgz6yLwCDuelnFXHkTZZOytCOEdqDOtGwsm%2BNj00fXt%2B6%2Bj4vcA7bwNrZwENmXwAKuZnvsNRThs5ozMPfPiHyoDF7xiduHcXb70A8dRFheHjiySQATBZk0nl9MHPkBEWUoEtYjyrPFNwGzfdlD37Zdu98KCv%2BMmD2BYpUCvcST39e0%2BS1Wr249FAAg7mPzWrS5NstEbE0xrsiA6QN1PfRFLnhr%2BspxVJTlY8Mw1DqNXeyCQFREEXz9cHB0QOev73QaNhOF4B%2B45PHFHFgDhJTqjuubJFqX1KQco7NTTuW8kq95k2G4eLEGzM7lfItnjNeTKcOfV%2FT8hOuV77A9IK0XjgMpCO0ZiuV3L%2F6njCFAOmucGB3OII5XgCXEJTDdZLElVbu3Vz0fWexvL30k0B6ggBACOmIUBAEUKX0dDTvW7RCYcdZPq6n%2FSsQnUO2RuyBRgQ9Rc5mMvJ6CNIj1nXfd9qWAsCkaZzJAk1L8UjVqY737dSjfCGrPHWqXL32Q0mB%2F2BXnke00WaEYv2aTzAbnuV5pcWkDGAAGJmhSafh6hjr%2BW2SVYHrP7bb%2BOdPW%2FUgflGlTM2gaK%2Ft7tp6%2BN6yixdN89DcIwGktIFPABfNbwoQqQWEUnDJzg1g0jDeK5p7Kp7nensXFI7uyAr%2FLyM7fYLnpa6LYScE8vDnot5hrKlslm%2BfE3nVxJgO4o3KcYu%2FF8XM8yFQ27n%2F65Te%2FzKl3Jhpjj6TCIDneRD5%2FItxr1vdkALw7p1qfeWPpjHxMtsXaPxu6FLc%2BrnbSB1r7fcrlr36nqwMzQfnplJDryQCGOh%2FbLjhcM%2FEvQ4Pdund9xRV5m1LfTXaF%2BK9gsLGB9nsgddcz8thM%2FarPzYM8%2FFazf9sMFaU%2Fi%2FwvNANwEhPvUGR8ozn7d%2BiDKXixtKpbHp81nV9E7puRy31ixKUbOe%2Fv3Ud891ghhDrL5Z975eaOvV%2BCNRp0Gfz%2BcJjDABdTwlpdfKbId0t5XYAcHz5D5ZVtWUp9%2Flog2L7PgVJqZx0HOE5Cqghemv1%2Bt%2FeGBmZ%2BdB2yNN72UEpnzXG32YADA186i3bIpPxMhuKrFK%2Fd77JUnbkKbYvRJlC8DzKSZK76Lq1he2dKy%2BZuSfesSz5a2xHDbLJ%2BJaqdv5H4EUY%2BzbG2m9HgN7mg81bfw4W1uu7AjvHaqDhqF%2FZ3Fq5XFy%2FcESSDsx5fvZ7wLEsNfXk%2BjlVHfpSCOB%2FAQAA%2F%2F8zd8orZc2N9AAAAABJRU5ErkJggg%3D%3D
From 721233c1fb976ca3d6b12c20409eb1fe9f3a6848 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Tue, 21 Jan 2025 13:40:02 +0100
Subject: [PATCH 310/836] docs: clarify readiness probe for replicas (#6629)
Closes #6628
Signed-off-by: Gabriele Bartolini
Signed-off-by: Francesco Canovai
Co-authored-by: Jaime Silvela
Co-authored-by: Francesco Canovai
---
docs/src/instance_manager.md | 43 ++++++++++++++++++++++++++----------
docs/src/replication.md | 10 +++++++--
2 files changed, 39 insertions(+), 14 deletions(-)
diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md
index 53d13c4e4d..c1335e67cc 100644
--- a/docs/src/instance_manager.md
+++ b/docs/src/instance_manager.md
@@ -15,17 +15,27 @@ main container, which in turn runs the PostgreSQL instance. During the lifetime
of the Pod, the instance manager acts as a backend to handle the
[startup, liveness and readiness probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes).
-## Startup, liveness and readiness probes
+## Startup, Liveness, and Readiness Probes
-The startup and liveness probes rely on `pg_isready`, while the readiness
-probe checks if the database is up and able to accept connections.
+CloudNativePG leverages [PostgreSQL's `pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html)
+to implement Kubernetes startup, liveness, and readiness probes.
### Startup Probe
-The `.spec.startDelay` parameter specifies the delay (in seconds) before the
-liveness probe activates after a PostgreSQL Pod starts. By default, this is set
-to `3600` seconds. You should adjust this value based on the time PostgreSQL
-requires to fully initialize in your environment.
+The startup probe ensures that a PostgreSQL instance, whether a primary or
+standby, has fully started according to `pg_isready`.
+While the startup probe is running, the liveness and readiness probes remain
+disabled. Following Kubernetes standards, if the startup probe fails, the
+kubelet will terminate the container, which will then be restarted.
+
+The startup probe provided by CloudNativePG is configurable via the
+parameter `.spec.startDelay`, which specifies the maximum time, in seconds,
+allowed for the startup probe to succeed. At a minimum, the probe requires
+`pg_isready` to return `0` or `1`.
+
+By default, the `startDelay` is set to `3600` seconds. It is recommended to
+adjust this setting based on the time PostgreSQL needs to fully initialize in
+your specific environment.
!!! Warning
Setting `.spec.startDelay` too low can cause the liveness probe to activate
@@ -71,9 +81,14 @@ spec:
### Liveness Probe
-The liveness probe begins after the startup probe succeeds and is responsible
-for detecting if the PostgreSQL instance has entered a broken state that
-requires a restart of the pod.
+The liveness probe begins after the startup probe successfully completes. Its
+primary role is to ensure the PostgreSQL instance—whether primary or standby—is
+operating correctly. This is achieved using the `pg_isready` utility. Both exit
+codes `0` (indicating the server is accepting connections) and `1` (indicating
+the server is rejecting connections, such as during startup or a smart
+shutdown) are treated as valid outcomes.
+Following Kubernetes standards, if the liveness probe fails, the
+kubelet will terminate the container, which will then be restarted.
The amount of time before a Pod is classified as not alive is configurable via
the `.spec.livenessProbeTimeout` parameter.
@@ -123,8 +138,12 @@ spec:
### Readiness Probe
-The readiness probe determines when a pod running a PostgreSQL instance is
-prepared to accept traffic and serve requests.
+The readiness probe begins once the startup probe has successfully completed.
+Its purpose is to check whether the PostgreSQL instance is ready to accept
+traffic and serve requests.
+For streaming replicas, it also requires that they have connected to the source
+at least once. Following Kubernetes standards, if the readiness probe fails,
+the pod will be marked unready and will not receive traffic from any services.
CloudNativePG uses the following default configuration for the readiness probe:
diff --git a/docs/src/replication.md b/docs/src/replication.md
index fbc37595bb..4c10899d1d 100644
--- a/docs/src/replication.md
+++ b/docs/src/replication.md
@@ -375,7 +375,7 @@ spec:
```
ANY 1 ("foo-2","foo-3","foo-1")
```
-
+
At this point no write operations will be allowed until at least one of the
standbys is available again.
@@ -390,6 +390,12 @@ attempt to replicate WAL records to the designated number of synchronous
standbys, but write operations will continue even if fewer than the requested
number of standbys are available.
+!!! Important
+ Make sure you have a clear understanding of what *ready/available* means
+ for a replica and set your expectations accordingly. By default, a replica is
+ considered ready when it has successfully connected to the source at least
+ once.
+
This setting balances data safety with availability, enabling applications to
continue writing during temporary standby unavailability—hence, it’s also known
as *self-healing mode*.
@@ -485,7 +491,7 @@ ANY q (pod1, pod2, ...)
Where:
-- `q` is an integer automatically calculated by the operator to be:
+- `q` is an integer automatically calculated by the operator to be:
`1 <= minSyncReplicas <= q <= maxSyncReplicas <= readyReplicas`
- `pod1, pod2, ...` is the list of all PostgreSQL pods in the cluster
From 0a4729b17610da8b12a5c4a15a6b79e2b376fd52 Mon Sep 17 00:00:00 2001
From: Jonathan Battiato
Date: Fri, 24 Jan 2025 16:01:31 +0100
Subject: [PATCH 311/836] chore(test): add preflight check for operator in CD
workflow (#5690)
This patch adds the preflight check for operator in the `continuous-delivery.yaml` workflow.
The operator check is a requirement to run properly on an OLM environment.
Closes #5642
Signed-off-by: Jonathan Battiato
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: Jonathan Gonzalez V.
---
.github/workflows/continuous-delivery.yml | 39 +++++++++++++++++++++++
Makefile | 3 ++
2 files changed, 42 insertions(+)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 7eb762fd35..f86f5172c1 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -258,6 +258,7 @@ jobs:
author_email: ${{ steps.build-meta.outputs.author_email }}
controller_img: ${{ env.CONTROLLER_IMG }}
controller_img_ubi8: ${{ env.CONTROLLER_IMG_UBI8 }}
+ index_img: ${{ env.INDEX_IMG }}
bundle_img: ${{ env.BUNDLE_IMG }}
catalog_img: ${{ env.CATALOG_IMG }}
steps:
@@ -433,6 +434,7 @@ jobs:
echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV
echo "CONTROLLER_IMG_UBI8=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV
echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV
+ echo "INDEX_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:index-${TAG}" >> $GITHUB_ENV
echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV
-
name: Generate manifest for operator deployment
@@ -1970,6 +1972,7 @@ jobs:
env:
CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }}
BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }}
+ INDEX_IMG: ${{ needs.buildx.outputs.index_img }}
CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }}
run: |
make olm-catalog
@@ -1985,6 +1988,42 @@ jobs:
run: |
envsubst < hack/install-config.yaml.template > hack/install-config.yaml
openshift-install create cluster --dir hack/ --log-level warn
+ -
+ name: Install operator-sdk
+ run: |
+ make operator-sdk
+ -
+ name: Install preflight
+ run: |
+ make preflight
+ -
+ name: Create Secret
+ run: |
+ export KUBECONFIG=$(pwd)/hack/auth/kubeconfig
+ oc create ns cloudnative-pg
+ oc -n cloudnative-pg create secret generic cnpg-pull-secret \
+ --from-file=.dockerconfigjson=$HOME/.docker/config.json \
+ --type=kubernetes.io/dockerconfigjson
+ -
+ name: Run preflight operator test
+ env:
+ BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }}
+ PFLT_INDEXIMAGE: ${{ needs.buildx.outputs.index_img }}
+ PFLT_SCORECARD_WAIT_TIME: "1200"
+ PFLT_ARTIFACTS: "preflight_operator_results"
+ run: |
+ PATH=$(pwd)/bin/:${PATH} \
+ KUBECONFIG=$(pwd)/hack/auth/kubeconfig \
+ bin/preflight check operator ${BUNDLE_IMG} \
+ --docker-config $HOME/.docker/config.json --loglevel trace
+ -
+ name: Check preflight operator results
+ run: |
+ PASS=`jq -r .passed preflight_operator_results/results.json`
+ if [[ "$PASS" == "false" ]]
+ then
+ exit 1
+ fi
-
name: Run E2E tests
if: (always() && !cancelled())
diff --git a/Makefile b/Makefile
index c71f2e7f95..de3d0d7ab2 100644
--- a/Makefile
+++ b/Makefile
@@ -27,6 +27,7 @@ endif
endif
CATALOG_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:catalog-/')
BUNDLE_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:bundle-/')
+INDEX_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:index-/')
COMMIT := $(shell git rev-parse --short HEAD || echo unknown)
DATE := $(shell git log -1 --pretty=format:'%ad' --date short)
@@ -170,6 +171,8 @@ olm-catalog: olm-bundle opm ## Build and push the index image for OLM Catalog
- Image: ${BUNDLE_IMG}" | envsubst > cloudnative-pg-operator-template.yaml
$(OPM) alpha render-template semver -o yaml < cloudnative-pg-operator-template.yaml > catalog/catalog.yaml ;\
$(OPM) validate catalog/ ;\
+ $(OPM) index add --mode semver --container-tool docker --bundles "${BUNDLE_IMG}" --tag "${INDEX_IMG}" ;\
+ docker push ${INDEX_IMG} ;\
DOCKER_BUILDKIT=1 docker build --push -f catalog.Dockerfile -t ${CATALOG_IMG} . ;\
echo -e "apiVersion: operators.coreos.com/v1alpha1\n\
kind: CatalogSource\n\
From 53b2647da9e44c1c44d48a38e5cd1f1e0a289abe Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Fri, 24 Jan 2025 17:50:31 +0100
Subject: [PATCH 312/836] fix: consistent threshold calculation for probes
(#6656)
In version 1.25.0, we introduced an inconsistent behavior in determining
the default value of the standard probe knobs when a stanza under
`.spec.probes` is defined (#6266).
This patch rectifies that behavior by allowing users to override any of
the settings, including `failureThreshold`. When `failureThreshold` is
not specified in the startup probe, its value is calculated by dividing
`.spec.startupDelay` by `periodSeconds` (which defaults to 10 and is now
overridable). The same principle applies to the liveness probe with the
`.spec.livenessProbeTimeout` option.
Closes: #6655
Signed-off-by: Leonardo Cecchi
Signed-off-by: Armando Ruocco
Signed-off-by: Gabriele Bartolini
Co-authored-by: Armando Ruocco
Co-authored-by: Gabriele Bartolini
---
api/v1/cluster_funcs.go | 24 +++++++++++++-----
api/v1/cluster_funcs_test.go | 10 +++++++-
docs/src/instance_manager.md | 48 +++++++++++++++++++-----------------
pkg/specs/pods.go | 47 +++++++++++++++--------------------
pkg/specs/pods_test.go | 16 +++---------
5 files changed, 77 insertions(+), 68 deletions(-)
diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go
index 132656c4db..6fa6eba800 100644
--- a/api/v1/cluster_funcs.go
+++ b/api/v1/cluster_funcs.go
@@ -1457,10 +1457,22 @@ func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) {
return
}
- k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds
- k8sProbe.TimeoutSeconds = p.TimeoutSeconds
- k8sProbe.PeriodSeconds = p.PeriodSeconds
- k8sProbe.SuccessThreshold = p.SuccessThreshold
- k8sProbe.FailureThreshold = p.FailureThreshold
- k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds
+ if p.InitialDelaySeconds != 0 {
+ k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds
+ }
+ if p.TimeoutSeconds != 0 {
+ k8sProbe.TimeoutSeconds = p.TimeoutSeconds
+ }
+ if p.PeriodSeconds != 0 {
+ k8sProbe.PeriodSeconds = p.PeriodSeconds
+ }
+ if p.SuccessThreshold != 0 {
+ k8sProbe.SuccessThreshold = p.SuccessThreshold
+ }
+ if p.FailureThreshold != 0 {
+ k8sProbe.FailureThreshold = p.FailureThreshold
+ }
+ if p.TerminationGracePeriodSeconds != nil {
+ k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds
+ }
}
diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go
index d0126362f5..fd2d93e5c1 100644
--- a/api/v1/cluster_funcs_test.go
+++ b/api/v1/cluster_funcs_test.go
@@ -1723,6 +1723,14 @@ var _ = Describe("Probes configuration", func() {
Expect(configuredProbe.PeriodSeconds).To(Equal(config.PeriodSeconds))
Expect(configuredProbe.SuccessThreshold).To(Equal(config.SuccessThreshold))
Expect(configuredProbe.FailureThreshold).To(Equal(config.FailureThreshold))
- Expect(configuredProbe.TerminationGracePeriodSeconds).To(BeNil())
+ Expect(*configuredProbe.TerminationGracePeriodSeconds).To(BeEquivalentTo(23))
+ })
+
+ It("should not overwrite any field", func() {
+ config := &Probe{}
+ configuredProbe := originalProbe.DeepCopy()
+ config.ApplyInto(configuredProbe)
+ Expect(originalProbe).To(BeEquivalentTo(*configuredProbe),
+ "configured probe should not be modified with zero values")
})
})
diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md
index c1335e67cc..df01927359 100644
--- a/docs/src/instance_manager.md
+++ b/docs/src/instance_manager.md
@@ -51,23 +51,25 @@ successThreshold: 1
timeoutSeconds: 5
```
-Here, `FAILURE_THRESHOLD` is calculated as `startDelay` divided by
-`periodSeconds`.
+The `failureThreshold` value is automatically calculated by dividing
+`startDelay` by `periodSeconds`.
-If the default behavior based on `startDelay` is not suitable for your use
-case, you can take full control of the startup probe by specifying custom
-parameters in the `.spec.probes.startup` stanza. Note that defining this stanza
-will override the default behavior, including the use of `startDelay`.
+You can customize any of the probe settings in the `.spec.probes.startup`
+section of your configuration.
!!! Warning
- Ensure that any custom probe settings are aligned with your cluster’s
- operational requirements to prevent unintended disruptions.
+ Be sure that any custom probe settings are tailored to your cluster's
+ operational requirements to avoid unintended disruptions.
!!! Info
- For detailed information about probe configuration, refer to the
- [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
+ For more details on probe configuration, refer to the
+ [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
+
+If you manually specify `.spec.probes.startup.failureThreshold`, it will
+override the default behavior and disable the automatic use of `startDelay`.
-For example, the following configuration bypasses `startDelay` entirely:
+For example, the following configuration explicitly sets custom probe
+parameters, bypassing `startDelay`:
```yaml
# ... snip
@@ -103,28 +105,30 @@ successThreshold: 1
timeoutSeconds: 5
```
-Here, `FAILURE_THRESHOLD` is calculated as `livenessProbeTimeout` divided by
-`periodSeconds`.
+The `failureThreshold` value is automatically calculated by dividing
+`livenessProbeTimeout` by `periodSeconds`.
By default, `.spec.livenessProbeTimeout` is set to `30` seconds. This means the
liveness probe will report a failure if it detects three consecutive probe
failures, with a 10-second interval between each check.
-If the default behavior using `livenessProbeTimeout` does not meet your needs,
-you can fully customize the liveness probe by defining parameters in the
-`.spec.probes.liveness` stanza. Keep in mind that specifying this stanza will
-override the default behavior, including the use of `livenessProbeTimeout`.
+You can customize any of the probe settings in the `.spec.probes.liveness`
+section of your configuration.
!!! Warning
- Ensure that any custom probe settings are aligned with your cluster’s
- operational requirements to prevent unintended disruptions.
+ Be sure that any custom probe settings are tailored to your cluster's
+ operational requirements to avoid unintended disruptions.
!!! Info
For more details on probe configuration, refer to the
- [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
+ [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe).
+
+If you manually specify `.spec.probes.liveness.failureThreshold`, it will
+override the default behavior and disable the automatic use of
+`livenessProbeTimeout`.
-For example, the following configuration overrides the default behavior and
-bypasses `livenessProbeTimeout`:
+For example, the following configuration explicitly sets custom probe
+parameters, bypassing `livenessProbeTimeout`:
```yaml
# ... snip
diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go
index 3fe970313b..b20b704f32 100644
--- a/pkg/specs/pods.go
+++ b/pkg/specs/pods.go
@@ -201,9 +201,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable
// This is the default startup probe, and can be overridden
// the user configuration in cluster.spec.probes.startup
StartupProbe: &corev1.Probe{
- FailureThreshold: getStartupProbeFailureThreshold(cluster.GetMaxStartDelay()),
- PeriodSeconds: StartupProbePeriod,
- TimeoutSeconds: 5,
+ PeriodSeconds: StartupProbePeriod,
+ TimeoutSeconds: 5,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: url.PathHealth,
@@ -275,22 +274,25 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable
addManagerLoggingOptions(cluster, &containers[0])
- // if user customizes the liveness probe timeout, we need to adjust the failure threshold
- addLivenessProbeFailureThreshold(cluster, &containers[0])
-
// use the custom probe configuration if provided
ensureCustomProbesConfiguration(&cluster, &containers[0])
- return containers
-}
+ // ensure a proper threshold is set
+ if containers[0].StartupProbe.FailureThreshold == 0 {
+ containers[0].StartupProbe.FailureThreshold = getFailureThreshold(
+ cluster.GetMaxStartDelay(),
+ containers[0].StartupProbe.PeriodSeconds,
+ )
+ }
-// addLivenessProbeFailureThreshold adjusts the liveness probe failure threshold
-// based on the `spec.livenessProbeTimeout` value
-func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.Container) {
- if cluster.Spec.LivenessProbeTimeout != nil {
- timeout := *cluster.Spec.LivenessProbeTimeout
- container.LivenessProbe.FailureThreshold = getLivenessProbeFailureThreshold(timeout)
+ if cluster.Spec.LivenessProbeTimeout != nil && containers[0].LivenessProbe.FailureThreshold == 0 {
+ containers[0].LivenessProbe.FailureThreshold = getFailureThreshold(
+ *cluster.Spec.LivenessProbeTimeout,
+ containers[0].LivenessProbe.PeriodSeconds,
+ )
}
+
+ return containers
}
// ensureCustomProbesConfiguration applies the custom probe configuration
@@ -308,22 +310,13 @@ func ensureCustomProbesConfiguration(cluster *apiv1.Cluster, container *corev1.C
cluster.Spec.Probes.Startup.ApplyInto(container.StartupProbe)
}
-// getStartupProbeFailureThreshold get the startup probe failure threshold
+// getFailureThreshold get the startup probe failure threshold
// FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1
-func getStartupProbeFailureThreshold(startupDelay int32) int32 {
- if startupDelay <= StartupProbePeriod {
- return 1
- }
- return int32(math.Ceil(float64(startupDelay) / float64(StartupProbePeriod)))
-}
-
-// getLivenessProbeFailureThreshold get the liveness probe failure threshold
-// FAILURE_THRESHOLD = ceil(livenessTimeout / periodSeconds) and minimum value is 1
-func getLivenessProbeFailureThreshold(livenessTimeout int32) int32 {
- if livenessTimeout <= LivenessProbePeriod {
+func getFailureThreshold(startupDelay, period int32) int32 {
+ if startupDelay <= period {
return 1
}
- return int32(math.Ceil(float64(livenessTimeout) / float64(LivenessProbePeriod)))
+ return int32(math.Ceil(float64(startupDelay) / float64(period)))
}
// CreateAffinitySection creates the affinity sections for Pods, given the configuration
diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go
index 17f9494d15..ff4a9c48f8 100644
--- a/pkg/specs/pods_test.go
+++ b/pkg/specs/pods_test.go
@@ -917,20 +917,12 @@ var _ = Describe("PodSpec drift detection", func() {
var _ = Describe("Compute startup probe failure threshold", func() {
It("should take the minimum value 1", func() {
- Expect(getStartupProbeFailureThreshold(5)).To(BeNumerically("==", 1))
+ Expect(getFailureThreshold(5, StartupProbePeriod)).To(BeNumerically("==", 1))
+ Expect(getFailureThreshold(5, LivenessProbePeriod)).To(BeNumerically("==", 1))
})
It("should take the value from 'startDelay / periodSeconds'", func() {
- Expect(getStartupProbeFailureThreshold(109)).To(BeNumerically("==", 11))
- })
-})
-
-var _ = Describe("Compute liveness probe failure threshold", func() {
- It("should take the minimum value 1", func() {
- Expect(getLivenessProbeFailureThreshold(5)).To(BeNumerically("==", 1))
- })
-
- It("should take the value from 'startDelay / periodSeconds'", func() {
- Expect(getLivenessProbeFailureThreshold(31)).To(BeNumerically("==", 4))
+ Expect(getFailureThreshold(109, StartupProbePeriod)).To(BeNumerically("==", 11))
+ Expect(getFailureThreshold(31, LivenessProbePeriod)).To(BeNumerically("==", 4))
})
})
From 1fb7202cfeb089e0572e22e83b289c089d8dc758 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Sat, 25 Jan 2025 23:33:28 +0100
Subject: [PATCH 313/836] fix(ci): make sure to run only on created comments
(#6666)
The action we use doesn't trigger and error when users delete a comment,
now we make sure that the event comes with the action created until
the GitHub action fix the issue.
Signed-off-by: Jonathan Gonzalez V.
---
.github/workflows/continuous-delivery.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index f86f5172c1..3f8299f62b 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -81,6 +81,7 @@ jobs:
check_commenter:
if: |
github.event_name == 'issue_comment' &&
+ github.event.action == 'created' &&
github.event.issue.pull_request &&
startsWith(github.event.comment.body, '/test')
name: Retrieve command
From a06f18096420624b40856910cf8f7326812b1aa2 Mon Sep 17 00:00:00 2001
From: Yurii Vlasov
Date: Mon, 27 Jan 2025 11:08:00 +0200
Subject: [PATCH 314/836] fix(import): skip role import if no roles are
specified (#6646)
Closes #6639
Signed-off-by: Yurii Vlasov
---
pkg/management/postgres/logicalimport/monolith.go | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/pkg/management/postgres/logicalimport/monolith.go b/pkg/management/postgres/logicalimport/monolith.go
index c63d787e91..5d95d7561c 100644
--- a/pkg/management/postgres/logicalimport/monolith.go
+++ b/pkg/management/postgres/logicalimport/monolith.go
@@ -35,12 +35,13 @@ func Monolith(
contextLogger := log.FromContext(ctx)
contextLogger.Info("starting monolith clone process")
- if err := cloneRoles(ctx, cluster, destination, origin); err != nil {
- return err
- }
-
- if err := cloneRoleInheritance(ctx, destination, origin); err != nil {
- return err
+ if len(cluster.Spec.Bootstrap.InitDB.Import.Roles) > 0 {
+ if err := cloneRoles(ctx, cluster, destination, origin); err != nil {
+ return err
+ }
+ if err := cloneRoleInheritance(ctx, destination, origin); err != nil {
+ return err
+ }
}
ds := databaseSnapshotter{cluster: cluster}
From 7fe4331db8d526eca386c1fdbd443f806634cc4e Mon Sep 17 00:00:00 2001
From: Aelxander
Date: Mon, 27 Jan 2025 14:38:02 +0100
Subject: [PATCH 315/836] docs(fix): correct `dataDurability` setting in
preferred section example (#6635)
Update the documentation to use the correct `dataDurability` setting
in the preferred section example.
Signed-off-by: Aelxander
---
docs/src/replication.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/src/replication.md b/docs/src/replication.md
index 4c10899d1d..fbe01b2d58 100644
--- a/docs/src/replication.md
+++ b/docs/src/replication.md
@@ -422,7 +422,7 @@ spec:
synchronous:
method: any
number: 2
- dataDurability: required
+ dataDurability: preferred
```
1. Initial state. The content of `synchronous_standby_names` is:
From 617de79db9642dfc1961ef02f591b951e0df8a85 Mon Sep 17 00:00:00 2001
From: Francesco Canovai
Date: Tue, 28 Jan 2025 10:34:37 +0100
Subject: [PATCH 316/836] docs: describe `tcp_syn_retries` behavior (#6673)
Add documentation for the replication connection behavior in relation
to the `tcp_syn_retries` setting.
Signed-off-by: Francesco Canovai
Signed-off-by: Marco Nenciarini
Signed-off-by: Gabriele Bartolini
Co-authored-by: Marco Nenciarini
Co-authored-by: Gabriele Bartolini
---
.wordlist-en-custom.txt | 1 +
docs/src/troubleshooting.md | 18 ++++++++++++++++++
2 files changed, 19 insertions(+)
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index bb773940fd..b80faf7d54 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -1131,6 +1131,7 @@ readthedocs
readyInstances
reconciler
reconciliationLoop
+reconnection
recoverability
recoveredCluster
recoveryTarget
diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md
index aa67c8be17..6003f2ac96 100644
--- a/docs/src/troubleshooting.md
+++ b/docs/src/troubleshooting.md
@@ -794,3 +794,21 @@ API. Please check your networking.
Another possible cause is when you have sidecar injection configured. Sidecars
such as Istio may make the network temporarily unavailable during startup. If
you have sidecar injection enabled, retry with injection disabled.
+
+### Replicas take over two minutes to reconnect after a failover
+
+When the primary instance fails, the operator promotes the most advanced
+standby to the primary role. Other standby instances then attempt to reconnect
+to the `-rw` service for replication. However, during this reconnection
+process, `kube-proxy` may not yet have updated its routing information.
+As a result, the initial `SYN` packet sent by the standby instances can fail
+to reach the intended destination.
+
+On Linux systems, the default value for the `tcp_syn_retries` kernel parameter
+is set to 6. This configuration means the system will retry a failed connection
+for approximately 127 seconds before giving up. This extended retry period can
+significantly delay the reconnection process. For more details, consult the
+[tcp_syn_retries documentation](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt).
+
+Altering this behavior will require changing the `tcp_syn_retries`
+parameter on the host node.
From cf64c5826af7b3335cdf25b162c8ecfb249efba6 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Wed, 29 Jan 2025 12:15:18 +0100
Subject: [PATCH 317/836] test(e2e): enable race detector in the E2e tests
(#6634)
This patch enables the Go built-in race condition detector when running
the E2e tests.
Closes #6633
Signed-off-by: Jonathan Gonzalez V.
---
.github/workflows/continuous-integration.yml | 2 +-
.goreleaser.yml | 19 +++++++++++++++++++
Makefile | 9 +++++++++
3 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index d5c39b2e61..bdd2c33d3c 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -30,7 +30,7 @@ env:
BUILD_PUSH_CACHE_FROM: ""
BUILD_PUSH_CACHE_TO: ""
BUILD_PLUGIN_RELEASE_ARGS: "build --skip=validate --clean --id kubectl-cnpg --timeout 60m"
- BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager"
+ BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager-race"
REPOSITORY_OWNER: "cloudnative-pg"
REGISTRY: "ghcr.io"
REGISTRY_USER: ${{ github.actor }}
diff --git a/.goreleaser.yml b/.goreleaser.yml
index bfa792b814..55a38dcd61 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -33,6 +33,25 @@ builds:
- amd64
- arm64
+- id: manager-race
+ binary: manager/manager_{{ .Arch }}
+ main: cmd/manager/main.go
+ no_unique_dist_dir: true
+ gcflags:
+ - all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}}
+ ldflags:
+ - -race
+ - -s
+ - -w
+ - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion={{.Env.VERSION}}
+ - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit={{.Env.COMMIT}}
+ - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate={{.Env.DATE}}
+ goos:
+ - linux
+ goarch:
+ - amd64
+ - arm64
+
- id: kubectl-cnpg
binary: kubectl-cnpg
main: cmd/kubectl-cnpg/main.go
diff --git a/Makefile b/Makefile
index de3d0d7ab2..607ea89f00 100644
--- a/Makefile
+++ b/Makefile
@@ -128,6 +128,15 @@ build-manager: generate fmt vet ## Build manager binary.
build-plugin: generate fmt vet ## Build plugin binary.
go build -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg
+build-race: generate fmt vet build-manager-race build-plugin-race ## Build the binaries adding the -race option.
+
+build-manager-race: generate fmt vet ## Build manager binary with -race option.
+ go build -race -o bin/manager -ldflags ${LDFLAGS} ./cmd/manager
+
+build-plugin-race: generate fmt vet ## Build plugin binary.
+ go build -race -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg
+
+
run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config.
go run ./cmd/manager
From 762c282c47493abb521f2e38ba21061090ca7303 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Wed, 29 Jan 2025 15:12:16 +0100
Subject: [PATCH 318/836] fix: add support for PgBouncer 1.24 (#6630)
PgBouncer 1.24 introduces new metrics as per the release notes at https://github.com/pgbouncer/pgbouncer/releases/tag/pgbouncer_1_24_0
This commit adds support for the following metrics:
- `total_bind_count`
- `total_client_parse_count`
- `total_server_parse_count`
- `avg_bind_count` (corrected typo)
- `avg_client_parse_count`
- `avg_server_parse_count`
Closes #6566
Signed-off-by: Jonathan Gonzalez V.
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
docs/src/connection_pooling.md | 90 +++++-----
.../pgbouncer/metricsserver/pools.go | 43 ++++-
.../pgbouncer/metricsserver/stats.go | 159 +++++++++++++++---
pkg/specs/pgbouncer/deployments.go | 2 +-
4 files changed, 222 insertions(+), 72 deletions(-)
diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md
index 0ac9d50bfe..3073f3b3f9 100644
--- a/docs/src/connection_pooling.md
+++ b/docs/src/connection_pooling.md
@@ -421,165 +421,165 @@ This example shows the output for `cnpg_pgbouncer` metrics:
```text
# HELP cnpg_pgbouncer_collection_duration_seconds Collection time duration in seconds
# TYPE cnpg_pgbouncer_collection_duration_seconds gauge
-cnpg_pgbouncer_collection_duration_seconds{collector="Collect.up"} 0.002443168
-
+cnpg_pgbouncer_collection_duration_seconds{collector="Collect.up"} 0.002338805
+# HELP cnpg_pgbouncer_collection_errors_total Total errors occurred accessing PostgreSQL for metrics.
+# TYPE cnpg_pgbouncer_collection_errors_total counter
+cnpg_pgbouncer_collection_errors_total{collector="sql: Scan error on column index 16, name \"load_balance_hosts\": converting NULL to int is unsupported"} 5
# HELP cnpg_pgbouncer_collections_total Total number of times PostgreSQL was accessed for metrics.
# TYPE cnpg_pgbouncer_collections_total counter
-cnpg_pgbouncer_collections_total 1
-
+cnpg_pgbouncer_collections_total 5
# HELP cnpg_pgbouncer_last_collection_error 1 if the last collection ended with error, 0 otherwise.
# TYPE cnpg_pgbouncer_last_collection_error gauge
cnpg_pgbouncer_last_collection_error 0
-
# HELP cnpg_pgbouncer_lists_databases Count of databases.
# TYPE cnpg_pgbouncer_lists_databases gauge
cnpg_pgbouncer_lists_databases 1
-
# HELP cnpg_pgbouncer_lists_dns_names Count of DNS names in the cache.
# TYPE cnpg_pgbouncer_lists_dns_names gauge
cnpg_pgbouncer_lists_dns_names 0
-
# HELP cnpg_pgbouncer_lists_dns_pending Not used.
# TYPE cnpg_pgbouncer_lists_dns_pending gauge
cnpg_pgbouncer_lists_dns_pending 0
-
# HELP cnpg_pgbouncer_lists_dns_queries Count of in-flight DNS queries.
# TYPE cnpg_pgbouncer_lists_dns_queries gauge
cnpg_pgbouncer_lists_dns_queries 0
-
# HELP cnpg_pgbouncer_lists_dns_zones Count of DNS zones in the cache.
# TYPE cnpg_pgbouncer_lists_dns_zones gauge
cnpg_pgbouncer_lists_dns_zones 0
-
# HELP cnpg_pgbouncer_lists_free_clients Count of free clients.
# TYPE cnpg_pgbouncer_lists_free_clients gauge
cnpg_pgbouncer_lists_free_clients 49
-
# HELP cnpg_pgbouncer_lists_free_servers Count of free servers.
# TYPE cnpg_pgbouncer_lists_free_servers gauge
cnpg_pgbouncer_lists_free_servers 0
-
# HELP cnpg_pgbouncer_lists_login_clients Count of clients in login state.
# TYPE cnpg_pgbouncer_lists_login_clients gauge
cnpg_pgbouncer_lists_login_clients 0
-
# HELP cnpg_pgbouncer_lists_pools Count of pools.
# TYPE cnpg_pgbouncer_lists_pools gauge
cnpg_pgbouncer_lists_pools 1
-
# HELP cnpg_pgbouncer_lists_used_clients Count of used clients.
# TYPE cnpg_pgbouncer_lists_used_clients gauge
cnpg_pgbouncer_lists_used_clients 1
-
# HELP cnpg_pgbouncer_lists_used_servers Count of used servers.
# TYPE cnpg_pgbouncer_lists_used_servers gauge
cnpg_pgbouncer_lists_used_servers 0
-
# HELP cnpg_pgbouncer_lists_users Count of users.
# TYPE cnpg_pgbouncer_lists_users gauge
cnpg_pgbouncer_lists_users 2
-
# HELP cnpg_pgbouncer_pools_cl_active Client connections that are linked to server connection and can process queries.
# TYPE cnpg_pgbouncer_pools_cl_active gauge
cnpg_pgbouncer_pools_cl_active{database="pgbouncer",user="pgbouncer"} 1
-
+# HELP cnpg_pgbouncer_pools_cl_active_cancel_req Client connections that have forwarded query cancellations to the server and are waiting for the server response.
+# TYPE cnpg_pgbouncer_pools_cl_active_cancel_req gauge
+cnpg_pgbouncer_pools_cl_active_cancel_req{database="pgbouncer",user="pgbouncer"} 0
# HELP cnpg_pgbouncer_pools_cl_cancel_req Client connections that have not forwarded query cancellations to the server yet.
# TYPE cnpg_pgbouncer_pools_cl_cancel_req gauge
cnpg_pgbouncer_pools_cl_cancel_req{database="pgbouncer",user="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_pools_cl_waiting Client connections that have sent queries but have not yet got a server connection.
# TYPE cnpg_pgbouncer_pools_cl_waiting gauge
cnpg_pgbouncer_pools_cl_waiting{database="pgbouncer",user="pgbouncer"} 0
-
+# HELP cnpg_pgbouncer_pools_cl_waiting_cancel_req Client connections that have not forwarded query cancellations to the server yet.
+# TYPE cnpg_pgbouncer_pools_cl_waiting_cancel_req gauge
+cnpg_pgbouncer_pools_cl_waiting_cancel_req{database="pgbouncer",user="pgbouncer"} 0
+# HELP cnpg_pgbouncer_pools_load_balance_hosts Number of hosts not load balancing between hosts
+# TYPE cnpg_pgbouncer_pools_load_balance_hosts gauge
+cnpg_pgbouncer_pools_load_balance_hosts{database="pgbouncer",user="pgbouncer"} 0
# HELP cnpg_pgbouncer_pools_maxwait How long the first (oldest) client in the queue has waited, in seconds. If this starts increasing, then the current pool of servers does not handle requests quickly enough. The reason may be either an overloaded server or just too small of a pool_size setting.
# TYPE cnpg_pgbouncer_pools_maxwait gauge
cnpg_pgbouncer_pools_maxwait{database="pgbouncer",user="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_pools_maxwait_us Microsecond part of the maximum waiting time.
# TYPE cnpg_pgbouncer_pools_maxwait_us gauge
cnpg_pgbouncer_pools_maxwait_us{database="pgbouncer",user="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_pools_pool_mode The pooling mode in use. 1 for session, 2 for transaction, 3 for statement, -1 if unknown
# TYPE cnpg_pgbouncer_pools_pool_mode gauge
cnpg_pgbouncer_pools_pool_mode{database="pgbouncer",user="pgbouncer"} 3
-
# HELP cnpg_pgbouncer_pools_sv_active Server connections that are linked to a client.
# TYPE cnpg_pgbouncer_pools_sv_active gauge
cnpg_pgbouncer_pools_sv_active{database="pgbouncer",user="pgbouncer"} 0
-
+# HELP cnpg_pgbouncer_pools_sv_active_cancel Server connections that are currently forwarding a cancel request
+# TYPE cnpg_pgbouncer_pools_sv_active_cancel gauge
+cnpg_pgbouncer_pools_sv_active_cancel{database="pgbouncer",user="pgbouncer"} 0
# HELP cnpg_pgbouncer_pools_sv_idle Server connections that are unused and immediately usable for client queries.
# TYPE cnpg_pgbouncer_pools_sv_idle gauge
cnpg_pgbouncer_pools_sv_idle{database="pgbouncer",user="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_pools_sv_login Server connections currently in the process of logging in.
# TYPE cnpg_pgbouncer_pools_sv_login gauge
cnpg_pgbouncer_pools_sv_login{database="pgbouncer",user="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_pools_sv_tested Server connections that are currently running either server_reset_query or server_check_query.
# TYPE cnpg_pgbouncer_pools_sv_tested gauge
cnpg_pgbouncer_pools_sv_tested{database="pgbouncer",user="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_pools_sv_used Server connections that have been idle for more than server_check_delay, so they need server_check_query to run on them before they can be used again.
# TYPE cnpg_pgbouncer_pools_sv_used gauge
cnpg_pgbouncer_pools_sv_used{database="pgbouncer",user="pgbouncer"} 0
-
+# HELP cnpg_pgbouncer_pools_sv_wait_cancels Servers that normally could become idle, but are waiting to do so until all in-flight cancel requests have completed that were sent to cancel a query on this server.
+# TYPE cnpg_pgbouncer_pools_sv_wait_cancels gauge
+cnpg_pgbouncer_pools_sv_wait_cancels{database="pgbouncer",user="pgbouncer"} 0
+# HELP cnpg_pgbouncer_stats_avg_bind_count Average number of prepared statements readied for execution by clients and forwarded to PostgreSQL by pgbouncer.
+# TYPE cnpg_pgbouncer_stats_avg_bind_count gauge
+cnpg_pgbouncer_stats_avg_bind_count{database="pgbouncer"} 0
+# HELP cnpg_pgbouncer_stats_avg_client_parse_count Average number of prepared statements created by clients.
+# TYPE cnpg_pgbouncer_stats_avg_client_parse_count gauge
+cnpg_pgbouncer_stats_avg_client_parse_count{database="pgbouncer"} 0
# HELP cnpg_pgbouncer_stats_avg_query_count Average queries per second in last stat period.
# TYPE cnpg_pgbouncer_stats_avg_query_count gauge
-cnpg_pgbouncer_stats_avg_query_count{database="pgbouncer"} 1
-
+cnpg_pgbouncer_stats_avg_query_count{database="pgbouncer"} 0
# HELP cnpg_pgbouncer_stats_avg_query_time Average query duration, in microseconds.
# TYPE cnpg_pgbouncer_stats_avg_query_time gauge
cnpg_pgbouncer_stats_avg_query_time{database="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_stats_avg_recv Average received (from clients) bytes per second.
# TYPE cnpg_pgbouncer_stats_avg_recv gauge
cnpg_pgbouncer_stats_avg_recv{database="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_stats_avg_sent Average sent (to clients) bytes per second.
# TYPE cnpg_pgbouncer_stats_avg_sent gauge
cnpg_pgbouncer_stats_avg_sent{database="pgbouncer"} 0
-
+# HELP cnpg_pgbouncer_stats_avg_server_parse_count Average number of prepared statements created by pgbouncer on a server.
+# TYPE cnpg_pgbouncer_stats_avg_server_parse_count gauge
+cnpg_pgbouncer_stats_avg_server_parse_count{database="pgbouncer"} 0
# HELP cnpg_pgbouncer_stats_avg_wait_time Time spent by clients waiting for a server, in microseconds (average per second).
# TYPE cnpg_pgbouncer_stats_avg_wait_time gauge
cnpg_pgbouncer_stats_avg_wait_time{database="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_stats_avg_xact_count Average transactions per second in last stat period.
# TYPE cnpg_pgbouncer_stats_avg_xact_count gauge
-cnpg_pgbouncer_stats_avg_xact_count{database="pgbouncer"} 1
-
+cnpg_pgbouncer_stats_avg_xact_count{database="pgbouncer"} 0
# HELP cnpg_pgbouncer_stats_avg_xact_time Average transaction duration, in microseconds.
# TYPE cnpg_pgbouncer_stats_avg_xact_time gauge
cnpg_pgbouncer_stats_avg_xact_time{database="pgbouncer"} 0
-
+# HELP cnpg_pgbouncer_stats_total_bind_count Total number of prepared statements readied for execution by clients and forwarded to PostgreSQL by pgbouncer
+# TYPE cnpg_pgbouncer_stats_total_bind_count gauge
+cnpg_pgbouncer_stats_total_bind_count{database="pgbouncer"} 0
+# HELP cnpg_pgbouncer_stats_total_client_parse_count Total number of prepared statements created by clients.
+# TYPE cnpg_pgbouncer_stats_total_client_parse_count gauge
+cnpg_pgbouncer_stats_total_client_parse_count{database="pgbouncer"} 0
# HELP cnpg_pgbouncer_stats_total_query_count Total number of SQL queries pooled by pgbouncer.
# TYPE cnpg_pgbouncer_stats_total_query_count gauge
-cnpg_pgbouncer_stats_total_query_count{database="pgbouncer"} 3
-
+cnpg_pgbouncer_stats_total_query_count{database="pgbouncer"} 15
# HELP cnpg_pgbouncer_stats_total_query_time Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL, executing queries.
# TYPE cnpg_pgbouncer_stats_total_query_time gauge
cnpg_pgbouncer_stats_total_query_time{database="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_stats_total_received Total volume in bytes of network traffic received by pgbouncer.
# TYPE cnpg_pgbouncer_stats_total_received gauge
cnpg_pgbouncer_stats_total_received{database="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_stats_total_sent Total volume in bytes of network traffic sent by pgbouncer.
# TYPE cnpg_pgbouncer_stats_total_sent gauge
cnpg_pgbouncer_stats_total_sent{database="pgbouncer"} 0
-
+# HELP cnpg_pgbouncer_stats_total_server_parse_count Total number of prepared statements created by pgbouncer on a server.
+# TYPE cnpg_pgbouncer_stats_total_server_parse_count gauge
+cnpg_pgbouncer_stats_total_server_parse_count{database="pgbouncer"} 0
# HELP cnpg_pgbouncer_stats_total_wait_time Time spent by clients waiting for a server, in microseconds.
# TYPE cnpg_pgbouncer_stats_total_wait_time gauge
cnpg_pgbouncer_stats_total_wait_time{database="pgbouncer"} 0
-
# HELP cnpg_pgbouncer_stats_total_xact_count Total number of SQL transactions pooled by pgbouncer.
# TYPE cnpg_pgbouncer_stats_total_xact_count gauge
-cnpg_pgbouncer_stats_total_xact_count{database="pgbouncer"} 3
-
+cnpg_pgbouncer_stats_total_xact_count{database="pgbouncer"} 15
# HELP cnpg_pgbouncer_stats_total_xact_time Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries.
# TYPE cnpg_pgbouncer_stats_total_xact_time gauge
cnpg_pgbouncer_stats_total_xact_time{database="pgbouncer"} 0
```
+!!! Info
+ For a better understanding of the metrics please refer to the PgBouncer documentation.
+
As for clusters, a specific pooler can be monitored using the
[Prometheus operator's](https://github.com/prometheus-operator/prometheus-operator) resource
[PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor).
diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go
index b6d95b799d..0c7ee4ae41 100644
--- a/pkg/management/pgbouncer/metricsserver/pools.go
+++ b/pkg/management/pgbouncer/metricsserver/pools.go
@@ -39,7 +39,8 @@ type ShowPoolsMetrics struct {
SvLogin,
MaxWait,
MaxWaitUs,
- PoolMode *prometheus.GaugeVec
+ PoolMode,
+ LoadBalanceHosts *prometheus.GaugeVec
}
// Describe produces the description for all the contained Metrics
@@ -180,6 +181,12 @@ func NewShowPoolsMetrics(subsystem string) *ShowPoolsMetrics {
Name: "pool_mode",
Help: "The pooling mode in use. 1 for session, 2 for transaction, 3 for statement, -1 if unknown",
}, []string{"database", "user"}),
+ LoadBalanceHosts: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "load_balance_hosts",
+ Help: "Number of hosts not load balancing between hosts",
+ }, []string{"database", "user"}),
}
}
@@ -233,6 +240,10 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
svActiveCancel int
svBeingCanceled int
)
+ // PGBouncer 1.24.0 or above
+ var (
+ loadBalanceHosts int
+ )
cols, err := rows.Columns()
if err != nil {
@@ -242,7 +253,11 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
return
}
for rows.Next() {
- const poolsColumnsPgBouncer1180 = 16
+ const (
+ poolsColumnsPgBouncer1180 = 16
+ poolsColumnsPgBouncer1240 = 17
+ )
+
switch len(cols) {
case poolsColumnsPgBouncer1180:
if err = rows.Scan(&database, &user,
@@ -265,6 +280,28 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
e.Metrics.Error.Set(1)
e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc()
}
+ case poolsColumnsPgBouncer1240:
+ if err = rows.Scan(&database, &user,
+ &clActive,
+ &clWaiting,
+ &clActiveCancelReq,
+ &clWaitingCancelReq,
+ &svActive,
+ &svActiveCancel,
+ &svBeingCanceled,
+ &svIdle,
+ &svUsed,
+ &svTested,
+ &svLogin,
+ &maxWait,
+ &maxWaitUs,
+ &poolMode,
+ &loadBalanceHosts,
+ ); err != nil {
+ contextLogger.Error(err, "Error while executing SHOW POOLS")
+ e.Metrics.Error.Set(1)
+ e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc()
+ }
default:
if err = rows.Scan(&database, &user,
&clActive,
@@ -299,6 +336,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
e.Metrics.ShowPools.MaxWait.WithLabelValues(database, user).Set(float64(maxWait))
e.Metrics.ShowPools.MaxWaitUs.WithLabelValues(database, user).Set(float64(maxWaitUs))
e.Metrics.ShowPools.PoolMode.WithLabelValues(database, user).Set(float64(poolModeToInt(poolMode)))
+ e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts))
}
e.Metrics.ShowPools.ClActive.Collect(ch)
@@ -316,6 +354,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
e.Metrics.ShowPools.MaxWait.Collect(ch)
e.Metrics.ShowPools.MaxWaitUs.Collect(ch)
e.Metrics.ShowPools.PoolMode.Collect(ch)
+ e.Metrics.ShowPools.LoadBalanceHosts.Collect(ch)
if err = rows.Err(); err != nil {
e.Metrics.Error.Set(1)
diff --git a/pkg/management/pgbouncer/metricsserver/stats.go b/pkg/management/pgbouncer/metricsserver/stats.go
index 4bad1c4075..1001699f79 100644
--- a/pkg/management/pgbouncer/metricsserver/stats.go
+++ b/pkg/management/pgbouncer/metricsserver/stats.go
@@ -25,7 +25,10 @@ import (
// ShowStatsMetrics contains all the SHOW STATS Metrics
type ShowStatsMetrics struct {
- TotalServerAssigCount,
+ TotalBindCount,
+ TotalClientParseCount,
+ TotalServerAssignCount,
+ TotalServerParseCount,
TotalXactCount,
TotalQueryCount,
TotalReceived,
@@ -33,7 +36,10 @@ type ShowStatsMetrics struct {
TotalXactTime,
TotalQueryTime,
TotalWaitTime,
- AvgServerAssigCount,
+ AvgBindCount,
+ AvgClientParseCount,
+ AvgServerAssignCount,
+ AvgServerParseCount,
AvgXactCount,
AvgQueryCount,
AvgRecv,
@@ -45,7 +51,10 @@ type ShowStatsMetrics struct {
// Describe produces the description for all the contained Metrics
func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) {
- r.TotalServerAssigCount.Describe(ch)
+ r.TotalBindCount.Describe(ch)
+ r.TotalClientParseCount.Describe(ch)
+ r.TotalServerAssignCount.Describe(ch)
+ r.TotalServerParseCount.Describe(ch)
r.TotalXactCount.Describe(ch)
r.TotalQueryCount.Describe(ch)
r.TotalReceived.Describe(ch)
@@ -53,7 +62,10 @@ func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) {
r.TotalXactTime.Describe(ch)
r.TotalQueryTime.Describe(ch)
r.TotalWaitTime.Describe(ch)
- r.AvgServerAssigCount.Describe(ch)
+ r.AvgBindCount.Describe(ch)
+ r.AvgClientParseCount.Describe(ch)
+ r.AvgServerAssignCount.Describe(ch)
+ r.AvgServerParseCount.Describe(ch)
r.AvgXactCount.Describe(ch)
r.AvgQueryCount.Describe(ch)
r.AvgRecv.Describe(ch)
@@ -65,7 +77,10 @@ func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) {
// Reset resets all the contained Metrics
func (r *ShowStatsMetrics) Reset() {
- r.AvgServerAssigCount.Reset()
+ r.TotalBindCount.Reset()
+ r.TotalClientParseCount.Reset()
+ r.TotalServerAssignCount.Reset()
+ r.TotalServerParseCount.Reset()
r.TotalXactCount.Reset()
r.TotalQueryCount.Reset()
r.TotalReceived.Reset()
@@ -73,7 +88,10 @@ func (r *ShowStatsMetrics) Reset() {
r.TotalXactTime.Reset()
r.TotalQueryTime.Reset()
r.TotalWaitTime.Reset()
- r.AvgServerAssigCount.Reset()
+ r.AvgBindCount.Reset()
+ r.TotalClientParseCount.Reset()
+ r.AvgServerAssignCount.Reset()
+ r.TotalServerParseCount.Reset()
r.AvgXactCount.Reset()
r.AvgQueryCount.Reset()
r.AvgRecv.Reset()
@@ -87,12 +105,31 @@ func (r *ShowStatsMetrics) Reset() {
func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics {
subsystem += "_stats"
return &ShowStatsMetrics{
- TotalServerAssigCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ TotalBindCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "total_bind_count",
+ Help: "Total number of prepared statements readied for execution by clients and forwarded to " +
+ "PostgreSQL by pgbouncer",
+ }, []string{"database"}),
+ TotalClientParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "total_client_parse_count",
+ Help: "Total number of prepared statements created by clients.",
+ }, []string{"database"}),
+ TotalServerAssignCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Subsystem: subsystem,
Name: "total_server_assignment_count",
Help: "Total time a server was assigned to a client.",
}, []string{"database"}),
+ TotalServerParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "total_server_parse_count",
+ Help: "Total number of prepared statements created by pgbouncer on a server.",
+ }, []string{"database"}),
TotalXactCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Subsystem: subsystem,
@@ -137,13 +174,32 @@ func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics {
Name: "total_wait_time",
Help: "Time spent by clients waiting for a server, in microseconds.",
}, []string{"database"}),
- AvgServerAssigCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ AvgBindCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "avg_bind_count",
+ Help: "Average number of prepared statements readied for execution by clients and forwarded to " +
+ "PostgreSQL by pgbouncer.",
+ }, []string{"database"}),
+ AvgClientParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "avg_client_parse_count",
+ Help: "Average number of prepared statements created by clients.",
+ }, []string{"database"}),
+ AvgServerAssignCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Subsystem: subsystem,
Name: "avg_server_assignment_count",
Help: "Average number of times a server was assigned to a client per second in " +
"the last stat period.",
}, []string{"database"}),
+ AvgServerParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: PrometheusNamespace,
+ Subsystem: subsystem,
+ Name: "avg_server_parse_count",
+ Help: "Average number of prepared statements created by pgbouncer on a server.",
+ }, []string{"database"}),
AvgXactCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: PrometheusNamespace,
Subsystem: subsystem,
@@ -230,10 +286,19 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
// PGBouncer >= 1.23.0
var (
- totalServerAssigCount,
- avgServerAssigCount int
+ totalServerAssignCount,
+ avgServerAssignCount int
)
+ // PGBouncer >= 1.24.0
+ var (
+ totalClientParseCount,
+ totalServerParseCount,
+ totalBindCount,
+ avgClientParseCount,
+ avgServerParseCount,
+ avgBindCount int
+ )
statCols, err := rows.Columns()
if err != nil {
contextLogger.Error(err, "Error while reading SHOW STATS")
@@ -244,7 +309,8 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
for rows.Next() {
var err error
- if statColsCount < 16 {
+ switch {
+ case statColsCount < 16:
err = rows.Scan(&database,
&totalXactCount,
&totalQueryCount,
@@ -261,9 +327,9 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
&avgQueryTime,
&avgWaitTime,
)
- } else {
+ case statColsCount == 17:
err = rows.Scan(&database,
- &totalServerAssigCount,
+ &totalServerAssignCount,
&totalXactCount,
&totalQueryCount,
&totalReceived,
@@ -271,7 +337,7 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
&totalXactTime,
&totalQueryTime,
&totalWaitTime,
- &avgServerAssigCount,
+ &avgServerAssignCount,
&avgXactCount,
&avgQueryCount,
&avgRecv,
@@ -280,6 +346,31 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
&avgQueryTime,
&avgWaitTime,
)
+ default:
+ err = rows.Scan(&database,
+ &totalServerAssignCount,
+ &totalXactCount,
+ &totalQueryCount,
+ &totalReceived,
+ &totalSent,
+ &totalXactTime,
+ &totalQueryTime,
+ &totalWaitTime,
+ &totalClientParseCount,
+ &totalServerParseCount,
+ &totalBindCount,
+ &avgServerAssignCount,
+ &avgXactCount,
+ &avgQueryCount,
+ &avgRecv,
+ &avgSent,
+ &avgXactTime,
+ &avgQueryTime,
+ &avgWaitTime,
+ &avgClientParseCount,
+ &avgServerParseCount,
+ &avgBindCount,
+ )
}
if err != nil {
contextLogger.Error(err, "Error while executing SHOW STATS")
@@ -302,19 +393,27 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
e.Metrics.ShowStats.AvgQueryTime.WithLabelValues(database).Set(float64(avgQueryTime))
e.Metrics.ShowStats.AvgWaitTime.WithLabelValues(database).Set(float64(avgWaitTime))
- if statColsCount >= 16 {
- e.Metrics.ShowStats.TotalServerAssigCount.WithLabelValues(database).Set(
- float64(totalServerAssigCount))
- e.Metrics.ShowStats.AvgServerAssigCount.WithLabelValues(database).Set(
- float64(avgServerAssigCount))
+ if statColsCount == 16 {
+ e.Metrics.ShowStats.TotalServerAssignCount.WithLabelValues(database).Set(
+ float64(totalServerAssignCount))
+ e.Metrics.ShowStats.AvgServerAssignCount.WithLabelValues(database).Set(
+ float64(avgServerAssignCount))
+ } else {
+ e.Metrics.ShowStats.TotalClientParseCount.WithLabelValues(database).Set(
+ float64(totalClientParseCount))
+ e.Metrics.ShowStats.TotalServerParseCount.WithLabelValues(database).Set(
+ float64(totalServerParseCount))
+ e.Metrics.ShowStats.TotalBindCount.WithLabelValues(database).Set(
+ float64(totalBindCount))
+ e.Metrics.ShowStats.AvgClientParseCount.WithLabelValues(database).Set(
+ float64(avgClientParseCount))
+ e.Metrics.ShowStats.AvgServerParseCount.WithLabelValues(database).Set(
+ float64(avgServerParseCount))
+ e.Metrics.ShowStats.AvgBindCount.WithLabelValues(database).Set(
+ float64(avgBindCount))
}
}
- if statColsCount >= 16 {
- e.Metrics.ShowStats.TotalServerAssigCount.Collect(ch)
- e.Metrics.ShowStats.AvgServerAssigCount.Collect(ch)
- }
-
e.Metrics.ShowStats.TotalXactCount.Collect(ch)
e.Metrics.ShowStats.TotalQueryCount.Collect(ch)
e.Metrics.ShowStats.TotalReceived.Collect(ch)
@@ -330,6 +429,18 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) {
e.Metrics.ShowStats.AvgQueryTime.Collect(ch)
e.Metrics.ShowStats.AvgWaitTime.Collect(ch)
+ if statColsCount == 16 {
+ e.Metrics.ShowStats.TotalServerAssignCount.Collect(ch)
+ e.Metrics.ShowStats.AvgServerAssignCount.Collect(ch)
+ } else {
+ e.Metrics.ShowStats.TotalClientParseCount.Collect(ch)
+ e.Metrics.ShowStats.TotalServerParseCount.Collect(ch)
+ e.Metrics.ShowStats.TotalBindCount.Collect(ch)
+ e.Metrics.ShowStats.AvgClientParseCount.Collect(ch)
+ e.Metrics.ShowStats.AvgServerParseCount.Collect(ch)
+ e.Metrics.ShowStats.AvgBindCount.Collect(ch)
+ }
+
if err = rows.Err(); err != nil {
e.Metrics.Error.Set(1)
e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc()
diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go
index 2f5d639502..78553d186a 100644
--- a/pkg/specs/pgbouncer/deployments.go
+++ b/pkg/specs/pgbouncer/deployments.go
@@ -39,7 +39,7 @@ import (
const (
// DefaultPgbouncerImage is the name of the pgbouncer image used by default
- DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.23.0"
+ DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.24.0"
)
// Deployment create the deployment of pgbouncer, given
From 7fc99bf8bb2b24926f5e3692f6de29547a9fdbe1 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 30 Jan 2025 09:50:13 +0100
Subject: [PATCH 319/836] chore(deps): update dependency go to v1.23.5 (main)
(#6679)
---
go.mod | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/go.mod b/go.mod
index 7ec79071cc..66a1bf84cd 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/cloudnative-pg/cloudnative-pg
go 1.23.0
-toolchain go1.23.4
+toolchain go1.23.5
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
From aad17635719942e8344907c99298acecee862b72 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Thu, 30 Jan 2025 14:56:16 +0100
Subject: [PATCH 320/836] fix(instance-manager): use pgdata content to discover
PostgreSQL version (#6659)
Use a precise check of the data directory to determine the PostgreSQL
major version during PostgreSQL configuration generation.
Closes #6658
Signed-off-by: Marco Nenciarini
Signed-off-by: Armando Ruocco
Signed-off-by: Jaime Silvela
Co-authored-by: Armando Ruocco
Co-authored-by: Jaime Silvela
---
.../cmd/manager/instance/run/lifecycle/run.go | 9 ++++---
pkg/management/postgres/configuration.go | 22 +++++++--------
pkg/management/postgres/configuration_test.go | 27 ++++++++++---------
pkg/management/postgres/utils/version.go | 21 +++++++++------
4 files changed, 44 insertions(+), 35 deletions(-)
diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go
index 6fde183430..d5d8036d5d 100644
--- a/internal/cmd/manager/instance/run/lifecycle/run.go
+++ b/internal/cmd/manager/instance/run/lifecycle/run.go
@@ -22,6 +22,7 @@ import (
"fmt"
"sync"
+ "github.com/blang/semver"
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/jackc/pgx/v5"
@@ -149,7 +150,7 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan
return nil
}
- majorVersion, err := postgresutils.GetMajorVersion(instance.PgData)
+ pgVersion, err := postgresutils.GetPgdataVersion(instance.PgData)
if err != nil {
return fmt.Errorf("while getting major version: %w", err)
}
@@ -179,7 +180,7 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan
return err
}
- err = configurePgRewindPrivileges(majorVersion, hasSuperuser, tx)
+ err = configurePgRewindPrivileges(pgVersion, hasSuperuser, tx)
if err != nil {
_ = tx.Rollback()
return err
@@ -227,10 +228,10 @@ func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) {
}
// configurePgRewindPrivileges ensures that the StreamingReplicationUser has enough rights to execute pg_rewind
-func configurePgRewindPrivileges(majorVersion int, hasSuperuser bool, tx *sql.Tx) error {
+func configurePgRewindPrivileges(pgVersion semver.Version, hasSuperuser bool, tx *sql.Tx) error {
// We need the superuser bit for the streaming-replication user since pg_rewind in PostgreSQL <= 10
// will require it.
- if majorVersion <= 10 {
+ if pgVersion.Major <= 10 {
if !hasSuperuser {
_, err := tx.Exec(fmt.Sprintf(
"ALTER USER %v SUPERUSER",
diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go
index 1cc414ea92..3c0b29e399 100644
--- a/pkg/management/postgres/configuration.go
+++ b/pkg/management/postgres/configuration.go
@@ -27,10 +27,12 @@ import (
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/postgres/version"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/configfile"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants"
+ postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/replication"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
@@ -65,11 +67,12 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster(
cluster *apiv1.Cluster,
preserveUserSettings bool,
) (bool, error) {
- postgresConfiguration, sha256, err := createPostgresqlConfiguration(cluster, preserveUserSettings)
+ pgVersion, err := postgresutils.GetPgdataVersion(instance.PgData)
if err != nil {
return false, err
}
+ postgresConfiguration, sha256 := createPostgresqlConfiguration(cluster, preserveUserSettings, pgVersion.Major)
postgresConfigurationChanged, err := InstallPgDataFileContent(
ctx,
instance.PgData,
@@ -376,16 +379,14 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool
// createPostgresqlConfiguration creates the PostgreSQL configuration to be
// used for this cluster and return it and its sha256 checksum
-func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings bool) (string, string, error) {
- // Extract the PostgreSQL major version
- fromVersion, err := cluster.GetPostgresqlVersion()
- if err != nil {
- return "", "", err
- }
-
+func createPostgresqlConfiguration(
+ cluster *apiv1.Cluster,
+ preserveUserSettings bool,
+ majorVersion uint64,
+) (string, string) {
info := postgres.ConfigurationInfo{
Settings: postgres.CnpgConfigurationSettings,
- Version: fromVersion,
+ Version: version.New(majorVersion, 0),
UserSettings: cluster.Spec.PostgresConfiguration.Parameters,
IncludingSharedPreloadLibraries: true,
AdditionalSharedPreloadLibraries: cluster.Spec.PostgresConfiguration.AdditionalLibraries,
@@ -417,8 +418,7 @@ func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings
info.RecoveryMinApplyDelay = cluster.Spec.ReplicaCluster.MinApplyDelay.Duration
}
- conf, sha256 := postgres.CreatePostgresqlConfFile(postgres.CreatePostgresqlConfiguration(info))
- return conf, sha256, nil
+ return postgres.CreatePostgresqlConfFile(postgres.CreatePostgresqlConfiguration(info))
}
// configurePostgresForImport configures Postgres to be optimized for the firt import
diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go
index f4a9d2f20d..2e3b211397 100644
--- a/pkg/management/postgres/configuration_test.go
+++ b/pkg/management/postgres/configuration_test.go
@@ -21,11 +21,14 @@ import (
"strings"
"time"
+ "github.com/cloudnative-pg/machinery/pkg/image/reference"
+ "github.com/cloudnative-pg/machinery/pkg/postgres/version"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -116,6 +119,9 @@ var _ = Describe("testing the building of the ldap config string", func() {
})
var _ = Describe("Test building of the list of temporary tablespaces", func() {
+ defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag)
+ Expect(err).ToNot(HaveOccurred())
+
clusterWithoutTablespaces := apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "configurationTest",
@@ -166,25 +172,25 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() {
}
It("doesn't set temp_tablespaces if there are no declared tablespaces", func() {
- config, _, err := createPostgresqlConfiguration(&clusterWithoutTablespaces, true)
- Expect(err).ShouldNot(HaveOccurred())
+ config, _ := createPostgresqlConfiguration(&clusterWithoutTablespaces, true, defaultVersion.Major())
Expect(config).ToNot(ContainSubstring("temp_tablespaces"))
})
It("doesn't set temp_tablespaces if there are no temporary tablespaces", func() {
- config, _, err := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true)
- Expect(err).ShouldNot(HaveOccurred())
+ config, _ := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true, defaultVersion.Major())
Expect(config).ToNot(ContainSubstring("temp_tablespaces"))
})
It("sets temp_tablespaces when there are temporary tablespaces", func() {
- config, _, err := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true)
- Expect(err).ShouldNot(HaveOccurred())
+ config, _ := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true, defaultVersion.Major())
Expect(config).To(ContainSubstring("temp_tablespaces = 'other_temporary_tablespace,temporary_tablespace'"))
})
})
var _ = Describe("recovery_min_apply_delay", func() {
+ defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag)
+ Expect(err).ToNot(HaveOccurred())
+
primaryCluster := apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "configurationTest",
@@ -233,24 +239,21 @@ var _ = Describe("recovery_min_apply_delay", func() {
It("do not set recovery_min_apply_delay in primary clusters", func() {
Expect(primaryCluster.IsReplica()).To(BeFalse())
- config, _, err := createPostgresqlConfiguration(&primaryCluster, true)
- Expect(err).ShouldNot(HaveOccurred())
+ config, _ := createPostgresqlConfiguration(&primaryCluster, true, defaultVersion.Major())
Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay"))
})
It("set recovery_min_apply_delay in replica clusters when set", func() {
Expect(replicaCluster.IsReplica()).To(BeTrue())
- config, _, err := createPostgresqlConfiguration(&replicaCluster, true)
- Expect(err).ShouldNot(HaveOccurred())
+ config, _ := createPostgresqlConfiguration(&replicaCluster, true, defaultVersion.Major())
Expect(config).To(ContainSubstring("recovery_min_apply_delay = '3600s'"))
})
It("do not set recovery_min_apply_delay in replica clusters when not set", func() {
Expect(replicaClusterWithNoDelay.IsReplica()).To(BeTrue())
- config, _, err := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true)
- Expect(err).ShouldNot(HaveOccurred())
+ config, _ := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true, defaultVersion.Major())
Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay"))
})
})
diff --git a/pkg/management/postgres/utils/version.go b/pkg/management/postgres/utils/version.go
index 23f99846e0..348a132dc3 100644
--- a/pkg/management/postgres/utils/version.go
+++ b/pkg/management/postgres/utils/version.go
@@ -38,25 +38,30 @@ func GetPgVersion(db *sql.DB) (*semver.Version, error) {
}
func parseVersionNum(versionNum string) (*semver.Version, error) {
- versionInt, err := strconv.Atoi(versionNum)
+ versionInt, err := strconv.ParseUint(versionNum, 10, 64)
if err != nil {
return nil, err
}
return &semver.Version{
- Major: uint64(versionInt / 10000), //nolint:gosec
- Minor: uint64((versionInt / 100) % 100), //nolint:gosec
- Patch: uint64(versionInt % 100), //nolint:gosec
+ Major: versionInt / 10000,
+ Minor: (versionInt / 100) % 100,
+ Patch: versionInt % 100,
}, nil
}
-// GetMajorVersion read the PG_VERSION file in the data directory
+// GetPgdataVersion read the PG_VERSION file in the data directory
// returning the major version of the database
-func GetMajorVersion(pgData string) (int, error) {
+func GetPgdataVersion(pgData string) (semver.Version, error) {
content, err := os.ReadFile(path.Join(pgData, "PG_VERSION")) // #nosec
if err != nil {
- return 0, err
+ return semver.Version{}, err
}
- return strconv.Atoi(strings.TrimSpace(string(content)))
+ major, err := strconv.ParseUint(strings.TrimSpace(string(content)), 10, 64)
+ if err != nil {
+ return semver.Version{}, err
+ }
+
+ return semver.Version{Major: major}, nil
}
From 0f373b985f1adb3bdeef44d93410b8dc34044650 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Thu, 30 Jan 2025 16:40:15 +0100
Subject: [PATCH 321/836] fix: initialize `override.conf` before invoking
`pg_rewind` (#6670)
This patch ensures `override.conf` is correctly initialized before invoking
`pg_rewind`, resolving an issue where configuration misalignment could cause
failures during the demotion of a former primary.
Closes #6669
Signed-off-by: Marco Nenciarini
---
internal/management/controller/instance_startup.go | 9 ++-------
pkg/management/postgres/instance.go | 10 +++++++---
2 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go
index c5c8ffcf12..a2a593cd39 100644
--- a/internal/management/controller/instance_startup.go
+++ b/internal/management/controller/instance_startup.go
@@ -33,6 +33,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/controller"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils"
postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
)
@@ -237,7 +238,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context
return err
}
- pgVersion, err := cluster.GetPostgresqlVersion()
+ pgVersion, err := utils.GetPgdataVersion(r.instance.PgData)
if err != nil {
return err
}
@@ -262,12 +263,6 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context
return fmt.Errorf("while ensuring all WAL files are archived: %w", err)
}
- // pg_rewind could require a clean shutdown of the old primary to
- // work. Unfortunately, if the old primary is already clean starting
- // it up may make it advance in respect to the new one.
- // The only way to check if we really need to start it up before
- // invoking pg_rewind is to try using pg_rewind and, on failures,
- // retrying after having started up the instance.
err = r.instance.Rewind(ctx, pgVersion)
if err != nil {
return fmt.Errorf("while exucuting pg_rewind: %w", err)
diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go
index 2a207a63e4..f19fa468c5 100644
--- a/pkg/management/postgres/instance.go
+++ b/pkg/management/postgres/instance.go
@@ -36,7 +36,6 @@ import (
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility"
"github.com/cloudnative-pg/machinery/pkg/log"
- "github.com/cloudnative-pg/machinery/pkg/postgres/version"
"go.uber.org/atomic"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
@@ -999,7 +998,7 @@ func (instance *Instance) removePgControlFileBackup() error {
// Rewind uses pg_rewind to align this data directory with the contents of the primary node.
// If postgres major version is >= 13, add "--restore-target-wal" option
-func (instance *Instance) Rewind(ctx context.Context, postgresVersion version.Data) error {
+func (instance *Instance) Rewind(ctx context.Context, postgresVersion semver.Version) error {
contextLogger := log.FromContext(ctx)
// Signal the liveness probe that we are running pg_rewind before starting postgres
@@ -1019,7 +1018,12 @@ func (instance *Instance) Rewind(ctx context.Context, postgresVersion version.Da
// As PostgreSQL 13 introduces support of restore from the WAL archive in pg_rewind,
// let’s automatically use it, if possible
- if postgresVersion.Major() >= 13 {
+ if postgresVersion.Major >= 13 {
+ // make sure restore_command is set in override.conf
+ if _, err := configurePostgresOverrideConfFile(instance.PgData, primaryConnInfo, ""); err != nil {
+ return err
+ }
+
options = append(options, "--restore-target-wal")
}
From 05087bbfe7ad678d3eb1cd1cc52d6cc7b309665a Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 30 Jan 2025 17:29:17 +0100
Subject: [PATCH 322/836] chore(deps): update kindest/node docker tag to
v1.32.1 (main) (#6689)
---
hack/e2e/run-e2e-kind.sh | 2 +-
hack/setup-cluster.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh
index c28579750c..129bccffff 100755
--- a/hack/e2e/run-e2e-kind.sh
+++ b/hack/e2e/run-e2e-kind.sh
@@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e"
export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false}
export BUILD_IMAGE=${BUILD_IMAGE:-false}
-KIND_NODE_DEFAULT_VERSION=v1.32.0
+KIND_NODE_DEFAULT_VERSION=v1.32.1
export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION}
export CLUSTER_ENGINE=kind
export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-}
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index a6c9c29f9e..dbcfe1f799 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then
fi
# Defaults
-KIND_NODE_DEFAULT_VERSION=v1.32.0
+KIND_NODE_DEFAULT_VERSION=v1.32.1
K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
From ff6478a3159d00e132b3dd610717f9e0371301e2 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 30 Jan 2025 23:17:34 +0100
Subject: [PATCH 323/836] fix(deps): update kubernetes patches to v0.32.1
(main) (#6711)
This PR contains the following updates:
https://redirect.github.com/kubernetes/api `v0.32.0` -> `v0.32.1`
https://redirect.github.com/kubernetes/apiextensions-apiserver `v0.32.0` -> `v0.32.1`
https://redirect.github.com/kubernetes/apimachinery `v0.32.0` -> `v0.32.1`
https://redirect.github.com/kubernetes/cli-runtime `v0.32.0` -> `v0.32.1`
https://redirect.github.com/kubernetes/client-go `v0.32.0` -> `v0.32.1`
---
go.mod | 10 +++++-----
go.sum | 20 ++++++++++----------
2 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/go.mod b/go.mod
index 66a1bf84cd..787645d0ef 100644
--- a/go.mod
+++ b/go.mod
@@ -40,11 +40,11 @@ require (
golang.org/x/term v0.28.0
google.golang.org/grpc v1.69.4
gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.32.0
- k8s.io/apiextensions-apiserver v0.32.0
- k8s.io/apimachinery v0.32.0
- k8s.io/cli-runtime v0.32.0
- k8s.io/client-go v0.32.0
+ k8s.io/api v0.32.1
+ k8s.io/apiextensions-apiserver v0.32.1
+ k8s.io/apimachinery v0.32.1
+ k8s.io/cli-runtime v0.32.1
+ k8s.io/client-go v0.32.1
k8s.io/utils v0.0.0-20241210054802-24370beab758
sigs.k8s.io/controller-runtime v0.19.4
sigs.k8s.io/yaml v1.4.0
diff --git a/go.sum b/go.sum
index 9f88f10806..dbb1a17380 100644
--- a/go.sum
+++ b/go.sum
@@ -281,16 +281,16 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
-k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
-k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0=
-k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw=
-k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
-k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
-k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c=
-k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ=
-k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
-k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
+k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
+k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
+k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw=
+k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto=
+k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
+k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM=
+k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY=
+k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
+k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
From fcd0b2a05429201e10e5d0059944ade0c2214095 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 31 Jan 2025 10:07:58 +0100
Subject: [PATCH 324/836] fix(deps): update all non-major go dependencies
(main) (#6690)
This PR contains the following updates:
https://github.com/evanphx/json-patch `v5.9.0` -> `v5.9.11`
https://github.com/goreleaser/goreleaser `v2.5.1` -> `v2.6.1`
https://github.com/stern/stern `v1.31.0` -> `v1.32.0`
https://github.com/grpc/grpc-go `v1.69.4` -> `v1.70.0`
---
Makefile | 2 +-
go.mod | 40 ++++++++++++------------
go.sum | 94 +++++++++++++++++++++++++++-----------------------------
3 files changed, 66 insertions(+), 70 deletions(-)
diff --git a/Makefile b/Makefile
index 607ea89f00..41c6cdf962 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,7 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.5.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
-GORELEASER_VERSION ?= v2.5.1
+GORELEASER_VERSION ?= v2.6.1
SPELLCHECK_VERSION ?= 0.46.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.39.0
diff --git a/go.mod b/go.mod
index 787645d0ef..bd79c41a71 100644
--- a/go.mod
+++ b/go.mod
@@ -1,8 +1,6 @@
module github.com/cloudnative-pg/cloudnative-pg
-go 1.23.0
-
-toolchain go1.23.5
+go 1.23.5
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
@@ -14,7 +12,7 @@ require (
github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc
github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
- github.com/evanphx/json-patch/v5 v5.9.0
+ github.com/evanphx/json-patch/v5 v5.9.11
github.com/go-logr/logr v1.4.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0
@@ -32,13 +30,13 @@ require (
github.com/robfig/cron v1.2.0
github.com/sethvargo/go-password v0.3.1
github.com/spf13/cobra v1.8.1
- github.com/stern/stern v1.31.0
+ github.com/stern/stern v1.32.0
github.com/thoas/go-funk v0.9.3
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/term v0.28.0
- google.golang.org/grpc v1.69.4
+ google.golang.org/grpc v1.70.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.1
k8s.io/apiextensions-apiserver v0.32.1
@@ -51,12 +49,12 @@ require (
)
require (
- github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/fatih/color v1.17.0 // indirect
+ github.com/fatih/color v1.18.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
@@ -68,7 +66,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
+ github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
@@ -82,11 +80,11 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/moby/spdystream v0.5.0 // indirect
- github.com/moby/term v0.5.0 // indirect
+ github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
@@ -100,24 +98,24 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
- golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/crypto v0.32.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
- golang.org/x/net v0.33.0 // indirect
- golang.org/x/oauth2 v0.23.0 // indirect
+ golang.org/x/net v0.34.0 // indirect
+ golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
- golang.org/x/time v0.7.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.28.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
- google.golang.org/protobuf v1.36.1 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect
+ google.golang.org/protobuf v1.36.3 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
+ k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
- sigs.k8s.io/kustomize/api v0.18.0 // indirect
- sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
+ sigs.k8s.io/kustomize/api v0.19.0 // indirect
+ sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
)
diff --git a/go.sum b/go.sum
index dbb1a17380..76ef9bb906 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
-github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
@@ -35,10 +35,10 @@ github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtz
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
-github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
-github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
@@ -65,8 +65,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -120,19 +120,18 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA=
github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
-github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
-github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
+github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -178,8 +177,8 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stern/stern v1.31.0 h1:kKHVgEmIgqbC6/sFZahUeU9TbxDH+0l3l5/ornLlQLs=
-github.com/stern/stern v1.31.0/go.mod h1:BfAeaPQhkMhQPTaFV81pS8YWCBmxg6IBL8fPGalt0qY=
+github.com/stern/stern v1.32.0 h1:xNw0CizB7/4CkWpI46cAo8tArDnS14eYKLaaDevEnrM=
+github.com/stern/stern v1.32.0/go.mod h1:Nv6yoHcb2E1HvklagJyd4rjoysJM4WxvcGVQtE651Xw=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
@@ -196,16 +195,16 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
-go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
-go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
-go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
-go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
-go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
-go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
-go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
-go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
-go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
+go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
+go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
+go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
+go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
+go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
+go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
+go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
+go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
+go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
+go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -217,8 +216,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
-golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
+golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -227,10 +226,10 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
-golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
-golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
+golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
+golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
+golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -240,7 +239,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
@@ -250,8 +248,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
-golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
-golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -264,12 +262,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
-google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
-google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
+google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
+google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
+google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
+google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -293,18 +291,18 @@ k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
-k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
+k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg=
+k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo=
sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
-sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
-sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
-sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
-sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
+sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
+sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o=
+sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
+sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk=
sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
From c0603fa64ffc01342cb4bad2f518f6c5bfbaa8da Mon Sep 17 00:00:00 2001
From: Itay Grudev
Date: Fri, 31 Jan 2025 11:12:06 +0200
Subject: [PATCH 325/836] docs: Typo in Service Management documentation:
updateStrategy: replace (#6716)
The Service Management documentation incorrectly refers to the `replace`
option as `recreate`.
Closes #6717
Signed-off-by: Itay Grudev
---
docs/src/service_management.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/src/service_management.md b/docs/src/service_management.md
index e39357bd20..b02274e231 100644
--- a/docs/src/service_management.md
+++ b/docs/src/service_management.md
@@ -82,11 +82,11 @@ field, as it is managed by the operator.
The `updateStrategy` field allows you to control how the operator
updates a service definition. By default, the operator uses the `patch`
strategy, applying changes directly to the service.
-Alternatively, the `recreate` strategy deletes the existing service and
+Alternatively, the `replace` strategy deletes the existing service and
recreates it from the template.
!!! Warning
- The `recreate` strategy will cause a service disruption with every
+ The `replace` strategy will cause a service disruption with every
change. However, it may be necessary for modifying certain
parameters that can only be set during service creation.
From 8787a489344c5a8fcfaea386ccc68f95ea9fc4dd Mon Sep 17 00:00:00 2001
From: Pierrick <139142330+pchovelon@users.noreply.github.com>
Date: Fri, 31 Jan 2025 10:27:19 +0100
Subject: [PATCH 326/836] docs: fix formatting of disk-full-failure section
(#6676)
Fix the rendering issue in the disk-full-failure section of the Instance Manager
documentation. The numbered list was incorrectly displayed due to a missing
new line.
Signed-off-by: Pierrick
---
docs/src/instance_manager.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md
index df01927359..30dceab78d 100644
--- a/docs/src/instance_manager.md
+++ b/docs/src/instance_manager.md
@@ -264,6 +264,7 @@ That allows a human administrator to address the root cause.
In such a case, if supported by the storage class, the quickest course of action
is currently to:
+
1. Expand the storage size of the full PVC
2. Increase the size in the `Cluster` resource to the same value
From 9227612261c7822b444ef1dde70976c86e08d82e Mon Sep 17 00:00:00 2001
From: Josh Earlenbaugh
Date: Fri, 31 Jan 2025 04:31:38 -0500
Subject: [PATCH 327/836] docs: fixed typo in Connection Pooling documentation
(#6707)
This patch fixes an incorrect key reference in the connection pooling documentation by
replacing `server_tls_server_tls_protocols` with `server_tls_protocols`.
Signed-off-by: Josh Earlenbaugh
---
docs/src/connection_pooling.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md
index 3073f3b3f9..4b66f0564d 100644
--- a/docs/src/connection_pooling.md
+++ b/docs/src/connection_pooling.md
@@ -371,7 +371,7 @@ are the ones directly set by PgBouncer.
- [`server_reset_query_always`](https://www.pgbouncer.org/config.html#server_reset_query_always)
- [`server_round_robin`](https://www.pgbouncer.org/config.html#server_round_robin)
- [`server_tls_ciphers`](https://www.pgbouncer.org/config.html#server_tls_ciphers)
-- [`server_tls_server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols)
+- [`server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols)
- [`stats_period`](https://www.pgbouncer.org/config.html#stats_period)
- [`suspend_timeout`](https://www.pgbouncer.org/config.html#suspend_timeout)
- [`tcp_defer_accept`](https://www.pgbouncer.org/config.html#tcp_defer_accept)
From 52f550f46123375d22b48640c3b2fd43a3d41657 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Fri, 31 Jan 2025 10:50:21 +0100
Subject: [PATCH 328/836] docs: fix formatting of reconcilePodSpec annotation
(#6706)
This patch fixes incorrect formatting of the `reconcilePodSpec`
annotation documentation.
Signed-off-by: Marco Nenciarini
---
docs/src/labels_annotations.md | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md
index 299758c434..4ab081903d 100644
--- a/docs/src/labels_annotations.md
+++ b/docs/src/labels_annotations.md
@@ -171,17 +171,17 @@ These predefined annotations are managed by CloudNativePG.
: Current status of the PVC: `initializing`, `ready`, or `detached`.
`cnpg.io/reconcilePodSpec`
-: Annotation can be applied to a `Cluster` or `Pooler` to prevent restarts.
+: Annotation can be applied to a `Cluster` or `Pooler` to prevent restarts.
- When set to `disabled` on a `Cluster`, the operator prevents instances
- from restarting due to changes in the PodSpec. This includes changes to:
+ When set to `disabled` on a `Cluster`, the operator prevents instances
+ from restarting due to changes in the PodSpec. This includes changes to:
- - Topology or affinity
- - Scheduler
- - Volumes or containers
+ - Topology or affinity
+ - Scheduler
+ - Volumes or containers
- When set to `disabled` on a `Pooler`, the operator restricts any modifications
- to the deployment specification, except for changes to `spec.instances`.
+ When set to `disabled` on a `Pooler`, the operator restricts any modifications
+ to the deployment specification, except for changes to `spec.instances`.
`cnpg.io/reconciliationLoop`
: When set to `disabled` on a `Cluster`, the operator prevents the
From 0a31b921a27eadf156a78d5bce105b3eaec4a619 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 31 Jan 2025 11:48:50 +0100
Subject: [PATCH 329/836] chore(deps): update operator framework (main) (#6719)
---
Makefile | 4 ++--
config/olm-scorecard/patches/basic.config.yaml | 2 +-
config/olm-scorecard/patches/olm.config.yaml | 10 +++++-----
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/Makefile b/Makefile
index 41c6cdf962..3aad4b4d8d 100644
--- a/Makefile
+++ b/Makefile
@@ -47,8 +47,8 @@ CONTROLLER_TOOLS_VERSION ?= v0.16.5
GORELEASER_VERSION ?= v2.6.1
SPELLCHECK_VERSION ?= 0.46.0
WOKE_VERSION ?= 0.19.0
-OPERATOR_SDK_VERSION ?= v1.39.0
-OPM_VERSION ?= v1.49.0
+OPERATOR_SDK_VERSION ?= v1.39.1
+OPM_VERSION ?= v1.50.0
PREFLIGHT_VERSION ?= 1.11.1
OPENSHIFT_VERSIONS ?= v4.12-v4.18
ARCH ?= amd64
diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml
index b89ce3bf90..b9ec7c6c82 100644
--- a/config/olm-scorecard/patches/basic.config.yaml
+++ b/config/olm-scorecard/patches/basic.config.yaml
@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- basic-check-spec
- image: quay.io/operator-framework/scorecard-test:v1.39.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.1
labels:
suite: basic
test: basic-check-spec-test
diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml
index 7eff5c9099..25d83f98f2 100644
--- a/config/olm-scorecard/patches/olm.config.yaml
+++ b/config/olm-scorecard/patches/olm.config.yaml
@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- olm-bundle-validation
- image: quay.io/operator-framework/scorecard-test:v1.39.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.1
labels:
suite: olm
test: olm-bundle-validation-test
@@ -14,7 +14,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-validation
- image: quay.io/operator-framework/scorecard-test:v1.39.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.1
labels:
suite: olm
test: olm-crds-have-validation-test
@@ -24,7 +24,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-resources
- image: quay.io/operator-framework/scorecard-test:v1.39.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.1
labels:
suite: olm
test: olm-crds-have-resources-test
@@ -34,7 +34,7 @@
entrypoint:
- scorecard-test
- olm-spec-descriptors
- image: quay.io/operator-framework/scorecard-test:v1.39.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.1
labels:
suite: olm
test: olm-spec-descriptors-test
@@ -44,7 +44,7 @@
entrypoint:
- scorecard-test
- olm-status-descriptors
- image: quay.io/operator-framework/scorecard-test:v1.39.0
+ image: quay.io/operator-framework/scorecard-test:v1.39.1
labels:
suite: olm
test: olm-status-descriptors-test
From 02e4e4dd08f5f5b342e607b6d6ad6cfc36ece4c4 Mon Sep 17 00:00:00 2001
From: Armando Ruocco
Date: Fri, 31 Jan 2025 12:00:22 +0100
Subject: [PATCH 330/836] feat: support customizable pod patches via
annotations (#6323)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This patch enables precise customization of pod specifications through
the `cnpg.io/podPatch` annotation on clusters. By applying valid JSON
patches, users can fine-tune pod configurations, including areas not
directly configurable within the Cluster resource, offering greater
flexibility for advanced use cases.
**⚠️ WARNING:** This feature may introduce discrepancies between the
operator’s expectations and Kubernetes behavior. Use with caution and
only as a last resort.
Closes #6234
## Release notes
Added support for custom pod patches using the `cnpg.io/podPatch`
annotation, enabling user-defined pod adjustments.
---------
Signed-off-by: Armando Ruocco
Signed-off-by: Leonardo Cecchi
Signed-off-by: Marco Nenciarini
Signed-off-by: Gabriele Bartolini
Co-authored-by: Leonardo Cecchi
Co-authored-by: Marco Nenciarini
Co-authored-by: Gabriele Bartolini
---
.wordlist-en-custom.txt | 1 +
docs/src/labels_annotations.md | 13 +++
internal/controller/cluster_create.go | 2 +-
internal/controller/cluster_upgrade_test.go | 75 ++++++++++-----
internal/controller/suite_test.go | 2 +-
internal/webhook/v1/cluster_webhook.go | 32 +++++++
internal/webhook/v1/cluster_webhook_test.go | 53 ++++++++++
pkg/specs/pg_pods_test.go | 3 +-
pkg/specs/pods.go | 26 ++++-
pkg/specs/pods_test.go | 36 +++++++
pkg/utils/labels_annotations.go | 4 +
tests/e2e/pod_patch_test.go | 101 ++++++++++++++++++++
12 files changed, 319 insertions(+), 29 deletions(-)
create mode 100644 tests/e2e/pod_patch_test.go
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index b80faf7d54..6ee17eac76 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -661,6 +661,7 @@ configmapkeyselector
configmaps
configs
configurability
+congruency
conn
connectionLimit
connectionParameters
diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md
index 4ab081903d..9acdbb13a6 100644
--- a/docs/src/labels_annotations.md
+++ b/docs/src/labels_annotations.md
@@ -160,6 +160,19 @@ These predefined annotations are managed by CloudNativePG.
`cnpg.io/podEnvHash`
: Deprecated, as the `cnpg.io/podSpec` annotation now also contains the pod environment.
+`cnpg.io/podPatch`
+: Annotation can be applied on a `Cluster` resource.
+
+ When set to JSON-patch formatted patch, the patch will be applied on the instance Pods.
+
+ **⚠️ WARNING:** This feature may introduce discrepancies between the
+ operator’s expectations and Kubernetes behavior. Use with caution and only as a
+ last resort.
+
+ **IMPORTANT**: adding or changing this annotation won't trigger a rolling deployment
+ of the generated Pods. The latter can be triggered manually by the user with
+ `kubectl cnpg restart`.
+
`cnpg.io/podSpec`
: Snapshot of the `spec` of the pod generated by the operator. This annotation replaces
the old, deprecated `cnpg.io/podEnvHash` annotation.
diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go
index 39029dd44f..2a88deec31 100644
--- a/internal/controller/cluster_create.go
+++ b/internal/controller/cluster_create.go
@@ -1443,7 +1443,7 @@ func findInstancePodToCreate(
if err != nil {
return nil, err
}
- return specs.PodWithExistingStorage(*cluster, serial), nil
+ return specs.PodWithExistingStorage(*cluster, serial)
}
return nil, nil
diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go
index 2037ce1245..a1540166e6 100644
--- a/internal/controller/cluster_upgrade_test.go
+++ b/internal/controller/cluster_upgrade_test.go
@@ -57,7 +57,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("will not require a restart for just created Pods", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
@@ -71,7 +72,9 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("requires rollout when running a different image name", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
+
pod.Spec.Containers[0].Image = "postgres:13.10"
status := postgres.PostgresqlStatus{
Pod: pod,
@@ -86,7 +89,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("requires rollout when a restart annotation has been added to the cluster", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
clusterRestart := cluster
clusterRestart.Annotations = make(map[string]string)
clusterRestart.Annotations[utils.ClusterRestartAnnotationName] = "now"
@@ -110,7 +114,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("should prioritize full rollout over inplace restarts", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
@@ -140,7 +145,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("requires rollout when PostgreSQL needs to be restarted", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
@@ -166,7 +172,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("requires pod rollout if executable does not have a hash", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
PendingRestart: false,
@@ -181,7 +188,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("checkPodSpecIsOutdated should not return any error", func() {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
rollout, err := checkPodSpecIsOutdated(pod, &cluster)
Expect(rollout.required).To(BeFalse())
Expect(rollout.canBeInPlace).To(BeFalse())
@@ -190,7 +198,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
})
It("checks when a rollout is needed for any reason", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
PendingRestart: true,
@@ -216,7 +225,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
When("the PodSpec annotation is not available", func() {
It("should trigger a rollout when the scheduler changes", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
cluster.Spec.SchedulerName = "newScheduler"
delete(pod.Annotations, utils.PodSpecAnnotationName)
@@ -241,7 +251,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
ImageName: "postgres:13.11",
},
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
cluster.Spec.SchedulerName = "newScheduler"
status := postgres.PostgresqlStatus{
@@ -272,7 +283,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
},
}
It("should trigger a rollout when the cluster has a Resource changed", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(clusterWithResources, 1)
+ pod, err := specs.PodWithExistingStorage(clusterWithResources, 1)
+ Expect(err).ToNot(HaveOccurred())
clusterWithResources.Spec.Resources.Limits["cpu"] = resource.MustParse("3") // was "2"
status := postgres.PostgresqlStatus{
@@ -290,7 +302,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
Expect(rollout.needsChangeOperatorImage).To(BeFalse())
})
It("should trigger a rollout when the cluster has Resources deleted from spec", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(clusterWithResources, 1)
+ pod, err := specs.PodWithExistingStorage(clusterWithResources, 1)
+ Expect(err).ToNot(HaveOccurred())
clusterWithResources.Spec.Resources = corev1.ResourceRequirements{}
status := postgres.PostgresqlStatus{
@@ -311,7 +324,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
When("the PodSpec annotation is not available", func() {
It("detects when a new custom environment variable is set", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
delete(pod.Annotations, utils.PodSpecAnnotationName)
cluster := cluster.DeepCopy()
@@ -341,7 +355,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
ImageName: "postgres:13.11",
},
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
delete(pod.Annotations, utils.PodSpecAnnotationName)
status := postgres.PostgresqlStatus{
@@ -365,7 +380,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
ImageName: "postgres:13.11",
},
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
delete(pod.Annotations, utils.PodSpecAnnotationName)
status := postgres.PostgresqlStatus{
@@ -388,7 +404,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
When("the podSpec annotation is available", func() {
It("detects when a new custom environment variable is set", func(ctx SpecContext) {
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
cluster := cluster.DeepCopy()
cluster.Spec.Env = []corev1.EnvVar{
@@ -418,7 +435,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
ImageName: "postgres:13.11",
},
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
@@ -441,7 +459,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
ImageName: "postgres:13.11",
},
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
@@ -467,7 +486,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{
Sources: []corev1.VolumeProjection{},
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
IsPodReady: true,
@@ -484,7 +504,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{
Sources: nil,
}
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
IsPodReady: true,
@@ -499,7 +520,8 @@ var _ = Describe("Pod upgrade", Ordered, func() {
It("should not require rollout if projected volume is nil",
func(ctx SpecContext) {
cluster.Spec.ProjectedVolumeTemplate = nil
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
status := postgres.PostgresqlStatus{
Pod: pod,
IsPodReady: true,
@@ -531,7 +553,9 @@ var _ = Describe("Test pod rollout due to topology", func() {
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{topology},
},
}
- pod = specs.PodWithExistingStorage(*cluster, 1)
+ var err error
+ pod, err = specs.PodWithExistingStorage(*cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
})
When("the original podSpec annotation is available", func() {
@@ -596,7 +620,9 @@ var _ = Describe("Test pod rollout due to topology", func() {
It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) {
cluster.Spec.TopologySpreadConstraints = nil
- pod = specs.PodWithExistingStorage(*cluster, 1)
+ var err error
+ pod, err = specs.PodWithExistingStorage(*cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec.TopologySpreadConstraints).To(BeNil())
status := postgres.PostgresqlStatus{
@@ -755,7 +781,8 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func()
It("skips the rollout if the annotation that disables PodSpec reconciliation is set", func(ctx SpecContext) {
cluster.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled"
- pod := specs.PodWithExistingStorage(cluster, 1)
+ pod, err := specs.PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
cluster.Spec.SchedulerName = "newScheduler"
delete(pod.Annotations, utils.PodSpecAnnotationName)
diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go
index ae8f8e8638..5f1396d865 100644
--- a/internal/controller/suite_test.go
+++ b/internal/controller/suite_test.go
@@ -313,7 +313,7 @@ func generateFakeClusterPods(
var pods []corev1.Pod
for idx < cluster.Spec.Instances {
idx++
- pod := specs.PodWithExistingStorage(*cluster, idx)
+ pod, _ := specs.PodWithExistingStorage(*cluster, idx)
cluster.SetInheritedDataAndOwnership(&pod.ObjectMeta)
err := c.Create(context.Background(), pod)
diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go
index fcba1b0621..dc7fd675fb 100644
--- a/internal/webhook/v1/cluster_webhook.go
+++ b/internal/webhook/v1/cluster_webhook.go
@@ -30,6 +30,7 @@ import (
"github.com/cloudnative-pg/machinery/pkg/postgres/version"
"github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/cloudnative-pg/machinery/pkg/types"
+ jsonpatch "github.com/evanphx/json-patch/v5"
storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -46,6 +47,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -206,6 +208,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error
v.validateManagedExtensions,
v.validateResources,
v.validateHibernationAnnotation,
+ v.validatePodPatchAnnotation,
v.validatePromotionToken,
}
@@ -2350,3 +2353,32 @@ func (v *ClusterCustomValidator) validateHibernationAnnotation(r *apiv1.Cluster)
),
}
}
+
+func (v *ClusterCustomValidator) validatePodPatchAnnotation(r *apiv1.Cluster) field.ErrorList {
+ jsonPatch, ok := r.Annotations[utils.PodPatchAnnotationName]
+ if !ok {
+ return nil
+ }
+
+ if _, err := jsonpatch.DecodePatch([]byte(jsonPatch)); err != nil {
+ return field.ErrorList{
+ field.Invalid(
+ field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName),
+ jsonPatch,
+ fmt.Sprintf("error decoding JSON patch: %s", err.Error()),
+ ),
+ }
+ }
+
+ if _, err := specs.PodWithExistingStorage(*r, 1); err != nil {
+ return field.ErrorList{
+ field.Invalid(
+ field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName),
+ jsonPatch,
+ fmt.Sprintf("jsonpatch doesn't apply cleanly to the pod: %s", err.Error()),
+ ),
+ }
+ }
+
+ return nil
+}
diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go
index faa1f0f0d7..a6828768cc 100644
--- a/internal/webhook/v1/cluster_webhook_test.go
+++ b/internal/webhook/v1/cluster_webhook_test.go
@@ -4884,3 +4884,56 @@ var _ = Describe("ServiceTemplate Validation", func() {
})
})
})
+
+var _ = Describe("validatePodPatchAnnotation", func() {
+ var v *ClusterCustomValidator
+
+ It("returns nil if the annotation is not present", func() {
+ cluster := &apiv1.Cluster{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}}
+ Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil())
+ })
+
+ It("returns an error if decoding the JSON patch fails to decode", func() {
+ cluster := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.PodPatchAnnotationName: "invalid-json-patch",
+ },
+ },
+ }
+
+ errors := v.validatePodPatchAnnotation(cluster)
+ Expect(errors).To(HaveLen(1))
+ Expect(errors[0].Type).To(Equal(field.ErrorTypeInvalid))
+ Expect(errors[0].Field).To(Equal("metadata.annotations." + utils.PodPatchAnnotationName))
+ Expect(errors[0].Detail).To(ContainSubstring("error decoding JSON patch"))
+ })
+
+ It("returns an error if decoding the JSON patch fails to apply", func() {
+ cluster := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/podInvalidSection", "value": "test"}]`,
+ },
+ },
+ }
+
+ errors := v.validatePodPatchAnnotation(cluster)
+ Expect(errors).To(HaveLen(1))
+ Expect(errors[0].Type).To(Equal(field.ErrorTypeInvalid))
+ Expect(errors[0].Field).To(Equal("metadata.annotations." + utils.PodPatchAnnotationName))
+ Expect(errors[0].Detail).To(ContainSubstring("jsonpatch doesn't apply cleanly to the pod"))
+ })
+
+ It("returns nil if the JSON patch is decoded successfully", func() {
+ cluster := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/metadata/name", "value": "test"}]`,
+ },
+ },
+ }
+
+ Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil())
+ })
+})
diff --git a/pkg/specs/pg_pods_test.go b/pkg/specs/pg_pods_test.go
index f7a5794e1c..4fc732690c 100644
--- a/pkg/specs/pg_pods_test.go
+++ b/pkg/specs/pg_pods_test.go
@@ -33,7 +33,8 @@ var _ = Describe("Extract the used image name", func() {
Namespace: "default",
},
}
- pod := PodWithExistingStorage(cluster, 1)
+ pod, err := PodWithExistingStorage(cluster, 1)
+ Expect(err).ToNot(HaveOccurred())
It("extract the default image name", func() {
Expect(GetPostgresImageName(*pod)).To(Equal(configuration.Current.PostgresImageName))
diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go
index b20b704f32..41482b5dea 100644
--- a/pkg/specs/pods.go
+++ b/pkg/specs/pods.go
@@ -27,6 +27,7 @@ import (
"slices"
"strconv"
+ jsonpatch "github.com/evanphx/json-patch/v5"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -433,7 +434,7 @@ func CreatePodSecurityContext(seccompProfile *corev1.SeccompProfile, user, group
}
// PodWithExistingStorage create a new instance with an existing storage
-func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod {
+func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) (*corev1.Pod, error) {
podName := GetInstanceName(cluster.Name, nodeSerial)
gracePeriod := int64(cluster.GetMaxStopDelay())
@@ -474,7 +475,28 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod {
if utils.IsAnnotationAppArmorPresent(&pod.Spec, cluster.Annotations) {
utils.AnnotateAppArmor(&pod.ObjectMeta, &pod.Spec, cluster.Annotations)
}
- return pod
+
+ if jsonPatch := cluster.Annotations[utils.PodPatchAnnotationName]; jsonPatch != "" {
+ serializedObject, err := json.Marshal(pod)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing pod to JSON: %w", err)
+ }
+ patch, err := jsonpatch.DecodePatch([]byte(jsonPatch))
+ if err != nil {
+ return nil, fmt.Errorf("while decoding JSON patch from annotation: %w", err)
+ }
+
+ serializedObject, err = patch.Apply(serializedObject)
+ if err != nil {
+ return nil, fmt.Errorf("while applying JSON patch from annotation: %w", err)
+ }
+
+ if err = json.Unmarshal(serializedObject, pod); err != nil {
+ return nil, fmt.Errorf("while deserializing pod to JSON: %w", err)
+ }
+ }
+
+ return pod, nil
}
// GetInstanceName returns a string indicating the instance name
diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go
index ff4a9c48f8..de99a9a026 100644
--- a/pkg/specs/pods_test.go
+++ b/pkg/specs/pods_test.go
@@ -26,6 +26,7 @@ import (
v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -926,3 +927,38 @@ var _ = Describe("Compute startup probe failure threshold", func() {
Expect(getFailureThreshold(31, LivenessProbePeriod)).To(BeNumerically("==", 4))
})
})
+
+var _ = Describe("PodWithExistingStorage", func() {
+ It("applies JSON patch from annotation", func() {
+ cluster := v1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cluster",
+ Namespace: "default",
+ Annotations: map[string]string{
+ utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/containers/0/image", "value": "new-image:latest"}]`, // nolint: lll
+ },
+ },
+ }
+
+ pod, err := PodWithExistingStorage(cluster, 1)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pod).NotTo(BeNil())
+ Expect(pod.Spec.Containers[0].Image).To(Equal("new-image:latest"))
+ })
+
+ It("returns error if JSON patch is invalid", func() {
+ cluster := v1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cluster",
+ Namespace: "default",
+ Annotations: map[string]string{
+ utils.PodPatchAnnotationName: `invalid-json-patch`,
+ },
+ },
+ }
+
+ _, err := PodWithExistingStorage(cluster, 1)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("while decoding JSON patch from annotation"))
+ })
+})
diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go
index 187325013f..028ab14c3d 100644
--- a/pkg/utils/labels_annotations.go
+++ b/pkg/utils/labels_annotations.go
@@ -233,6 +233,10 @@ const (
// PluginPortAnnotationName is the name of the annotation containing the
// port the plugin is listening to
PluginPortAnnotationName = MetadataNamespace + "/pluginPort"
+
+ // PodPatchAnnotationName is the name of the annotation containing the
+ // patch to apply to the pod
+ PodPatchAnnotationName = MetadataNamespace + "/podPatch"
)
type annotationStatus string
diff --git a/tests/e2e/pod_patch_test.go b/tests/e2e/pod_patch_test.go
new file mode 100644
index 0000000000..3ad5a84eb9
--- /dev/null
+++ b/tests/e2e/pod_patch_test.go
@@ -0,0 +1,101 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+ "github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Pod patch", Label(tests.LabelSmoke, tests.LabelBasic), func() {
+ const (
+ sampleFile = fixturesDir + "/base/cluster-storage-class.yaml.template"
+ clusterName = "postgresql-storage-class"
+ level = tests.Lowest
+ )
+
+ var namespace string
+
+ BeforeEach(func() {
+ if testLevelEnv.Depth < int(level) {
+ Skip("Test depth is lower than the amount requested for this test")
+ }
+ })
+
+ It("use the podPatch annotation to generate Pods", func(_ SpecContext) {
+ const namespacePrefix = "cluster-patch-e2e"
+ var err error
+
+ namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix)
+ Expect(err).ToNot(HaveOccurred())
+
+ AssertCreateCluster(namespace, clusterName, sampleFile, env)
+
+ By("adding the podPatch annotation", func() {
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ patchedCluster := cluster.DeepCopy()
+
+ patchedCluster.SetAnnotations(map[string]string{
+ utils.PodPatchAnnotationName: `
+ [
+ {
+ "op": "add",
+ "path": "/metadata/annotations/e2e.cnpg.io",
+ "value": "this-test"
+ }
+ ]
+ `,
+ })
+ err = env.Client.Patch(env.Ctx, patchedCluster, client.MergeFrom(cluster))
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ By("deleting all the Pods", func() {
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ for i := range podList.Items {
+ err := env.Client.Delete(env.Ctx, &podList.Items[i])
+ Expect(err).ToNot(HaveOccurred())
+ }
+ })
+
+ By("waiting for the new annotation to be applied to the new Pods", func() {
+ cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ timeout := 120
+ Eventually(func(g Gomega) {
+ podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(podList.Items).To(HaveLen(cluster.Spec.Instances))
+
+ for _, pod := range podList.Items {
+ g.Expect(pod.Annotations).To(HaveKeyWithValue("e2e.cnpg.io", "this-test"))
+ }
+ }, timeout).Should(Succeed())
+ })
+ })
+})
From f982fcb719b7f56dddf5163680c76da10fb1c486 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Fri, 31 Jan 2025 12:11:06 +0100
Subject: [PATCH 331/836] fix(webhook): return warnings alongside errors in
webhook responses (#6579)
This patch fixes an issue where webhook responses only included errors,
omitting warnings when both were present.
Closes #6578
Signed-off-by: Marco Nenciarini
Signed-off-by: Gabriele Quaresima
Co-authored-by: Gabriele Quaresima
---
internal/webhook/v1/cluster_webhook.go | 9 +++++----
internal/webhook/v1/scheduledbackup_webhook.go | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go
index dc7fd675fb..4be9f552b5 100644
--- a/internal/webhook/v1/cluster_webhook.go
+++ b/internal/webhook/v1/cluster_webhook.go
@@ -78,7 +78,7 @@ var _ webhook.CustomDefaulter = &ClusterCustomDefaulter{}
func (d *ClusterCustomDefaulter) Default(_ context.Context, obj runtime.Object) error {
cluster, ok := obj.(*apiv1.Cluster)
if !ok {
- return fmt.Errorf("expected an Cluster object but got %T", obj)
+ return fmt.Errorf("expected a Cluster object but got %T", obj)
}
clusterLog.Info("Defaulting for Cluster", "name", cluster.GetName(), "namespace", cluster.GetNamespace())
@@ -113,7 +113,7 @@ func (v *ClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.O
return allWarnings, nil
}
- return nil, apierrors.NewInvalid(
+ return allWarnings, apierrors.NewInvalid(
schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"},
cluster.Name, allErrs)
}
@@ -142,12 +142,13 @@ func (v *ClusterCustomValidator) ValidateUpdate(
v.validate(cluster),
v.validateClusterChanges(cluster, oldCluster)...,
)
+ allWarnings := v.getAdmissionWarnings(cluster)
if len(allErrs) == 0 {
- return v.getAdmissionWarnings(cluster), nil
+ return allWarnings, nil
}
- return nil, apierrors.NewInvalid(
+ return allWarnings, apierrors.NewInvalid(
schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"},
cluster.Name, allErrs)
}
diff --git a/internal/webhook/v1/scheduledbackup_webhook.go b/internal/webhook/v1/scheduledbackup_webhook.go
index fdf6ccdbf3..4cfeb98cc7 100644
--- a/internal/webhook/v1/scheduledbackup_webhook.go
+++ b/internal/webhook/v1/scheduledbackup_webhook.go
@@ -98,7 +98,7 @@ func (v *ScheduledBackupCustomValidator) ValidateCreate(
return warnings, nil
}
- return nil, apierrors.NewInvalid(
+ return warnings, apierrors.NewInvalid(
schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "ScheduledBackup"},
scheduledBackup.Name, allErrs)
}
@@ -120,7 +120,7 @@ func (v *ScheduledBackupCustomValidator) ValidateUpdate(
return warnings, nil
}
- return nil, apierrors.NewInvalid(
+ return warnings, apierrors.NewInvalid(
schema.GroupKind{Group: "scheduledBackup.cnpg.io", Kind: "ScheduledBackup"},
scheduledBackup.Name, allErrs)
}
From ccf40a135387031bea7d84eb9e32b85810f5d889 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 31 Jan 2025 13:42:14 +0100
Subject: [PATCH 332/836] chore(deps): update module
sigs.k8s.io/kustomize/kustomize/v5 to v5.6.0 (main) (#6718)
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 3aad4b4d8d..82a2d24a20 100644
--- a/Makefile
+++ b/Makefile
@@ -42,7 +42,7 @@ LOCALBIN ?= $(shell pwd)/bin
BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
-KUSTOMIZE_VERSION ?= v5.5.0
+KUSTOMIZE_VERSION ?= v5.6.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
GORELEASER_VERSION ?= v2.6.1
SPELLCHECK_VERSION ?= 0.46.0
From 83aef66ceb4f2b9bee5acd6c3b60308268b32370 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Fri, 31 Jan 2025 13:50:24 +0100
Subject: [PATCH 333/836] chore: remove unused code (#6598)
This code should have been removed in 28923eb80b190524c93334e4531259cd5f67e857
Signed-off-by: Marco Nenciarini
---
api/v1/cluster_types.go | 11 -----------
api/v1/zz_generated.deepcopy.go | 16 ----------------
2 files changed, 27 deletions(-)
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index cc14768848..a32b66d91e 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -1952,17 +1952,6 @@ type AffinityConfiguration struct {
AdditionalPodAffinity *corev1.PodAffinity `json:"additionalPodAffinity,omitempty"`
}
-// RollingUpdateStatus contains the information about an instance which is
-// being updated
-type RollingUpdateStatus struct {
- // The image which we put into the Pod
- ImageName string `json:"imageName"`
-
- // When the update has been started
- // +optional
- StartedAt metav1.Time `json:"startedAt,omitempty"`
-}
-
// BackupTarget describes the preferred targets for a backup
type BackupTarget string
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index b4b9d5b295..c8396cfd7f 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -2523,22 +2523,6 @@ func (in *RoleConfiguration) DeepCopy() *RoleConfiguration {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RollingUpdateStatus) DeepCopyInto(out *RollingUpdateStatus) {
- *out = *in
- in.StartedAt.DeepCopyInto(&out.StartedAt)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatus.
-func (in *RollingUpdateStatus) DeepCopy() *RollingUpdateStatus {
- if in == nil {
- return nil
- }
- out := new(RollingUpdateStatus)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SQLRefs) DeepCopyInto(out *SQLRefs) {
*out = *in
From 130037a3d0ecc57df60997e50a70497626688b33 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 31 Jan 2025 15:05:24 +0100
Subject: [PATCH 334/836] chore(deps): update kubernetes csi (main) (#6697)
This PR contains the following updates:
https://github.com/kubernetes-csi/external-provisioner `v5.1.0` -> `v5.2.0`
https://github.com/rook/rook `v1.16.1` -> `v1.16.2`
---
.github/workflows/continuous-delivery.yml | 2 +-
hack/setup-cluster.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 3f8299f62b..5ae69682f1 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -37,7 +37,7 @@ env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.26.0"
- ROOK_VERSION: "v1.16.1"
+ ROOK_VERSION: "v1.16.2"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index dbcfe1f799..884f1e3e09 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -28,7 +28,7 @@ KIND_NODE_DEFAULT_VERSION=v1.32.1
K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0
-EXTERNAL_PROVISIONER_VERSION=v5.1.0
+EXTERNAL_PROVISIONER_VERSION=v5.2.0
EXTERNAL_RESIZER_VERSION=v1.13.1
EXTERNAL_ATTACHER_VERSION=v4.8.0
K8S_VERSION=${K8S_VERSION-}
From 62d48282bdd4c640d1af104b9cf637087148075e Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 31 Jan 2025 17:39:31 +0100
Subject: [PATCH 335/836] fix(deps): update module
sigs.k8s.io/controller-runtime to v0.20.1 (main) (#6730)
---
go.mod | 3 +--
go.sum | 6 ++----
2 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index bd79c41a71..86cb8cdfd2 100644
--- a/go.mod
+++ b/go.mod
@@ -44,7 +44,7 @@ require (
k8s.io/cli-runtime v0.32.1
k8s.io/client-go v0.32.1
k8s.io/utils v0.0.0-20241210054802-24370beab758
- sigs.k8s.io/controller-runtime v0.19.4
+ sigs.k8s.io/controller-runtime v0.20.1
sigs.k8s.io/yaml v1.4.0
)
@@ -99,7 +99,6 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
- golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.10.0 // indirect
diff --git a/go.sum b/go.sum
index 76ef9bb906..3dc9308dce 100644
--- a/go.sum
+++ b/go.sum
@@ -218,8 +218,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
-golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
-golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -295,8 +293,8 @@ k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8X
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo=
-sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
+sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE=
+sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
From 98e27966ddb337afe3769fcb7347f0d7deae889b Mon Sep 17 00:00:00 2001
From: thomasboussekey
Date: Tue, 4 Feb 2025 09:34:01 +0100
Subject: [PATCH 336/836] docs: add Mirakl to `ADOPTERS.md` (#6751)
Signed-off-by: thomasboussekey
Signed-off-by: thomasboussekey
Co-authored-by: Gabriele Bartolini
---
ADOPTERS.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/ADOPTERS.md b/ADOPTERS.md
index 4a4601f02f..2936cc88b3 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -60,3 +60,4 @@ This list is sorted in chronological order, based on the submission date.
| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. |
| [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. |
| [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. |
+| [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. |
From a725d1f2cc57b09bd3df4c838341ab29b3f63c9b Mon Sep 17 00:00:00 2001
From: solidDoWant
Date: Tue, 4 Feb 2025 05:41:34 -0600
Subject: [PATCH 337/836] feat: add kubernetes `client-gen` tool support
(#6695)
This patch introduces support for the Kubernetes `client-gen` tool,
enabling the automated generation of Go clients for all custom resources
defined by the operator's CRDs.
Closes #6585
Signed-off-by: Fred Heinecke
Signed-off-by: Marco Nenciarini
Signed-off-by: Armando Ruocco
Co-authored-by: Marco Nenciarini
Co-authored-by: Armando Ruocco
---
api/v1/backup_funcs.go | 4 ++--
api/v1/cluster_funcs.go | 4 ++--
api/v1/clusterimagecatalog_types.go | 1 +
api/v1/groupversion_info.go | 9 +++------
internal/cmd/plugin/report/operator_utils.go | 6 +++---
internal/controller/cluster_controller.go | 4 ++--
internal/controller/cluster_create_test.go | 4 ++--
internal/controller/cluster_image.go | 2 +-
internal/controller/cluster_restore_test.go | 2 +-
internal/controller/pooler_controller.go | 2 +-
internal/controller/scheduledbackup_controller.go | 2 +-
internal/controller/suite_test.go | 6 +++---
pkg/management/client.go | 2 +-
tests/e2e/rolling_update_test.go | 2 +-
tests/utils/operator/upgrade.go | 4 ++--
15 files changed, 26 insertions(+), 28 deletions(-)
diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go
index c41e09ee12..e7ec0411ba 100644
--- a/api/v1/backup_funcs.go
+++ b/api/v1/backup_funcs.go
@@ -236,8 +236,8 @@ func (backup *Backup) GetVolumeSnapshotConfiguration(
// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object.
func (backup *Backup) EnsureGVKIsPresent() {
backup.SetGroupVersionKind(schema.GroupVersionKind{
- Group: GroupVersion.Group,
- Version: GroupVersion.Version,
+ Group: SchemeGroupVersion.Group,
+ Version: SchemeGroupVersion.Version,
Kind: BackupKind,
})
}
diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go
index 6fa6eba800..432e46ee89 100644
--- a/api/v1/cluster_funcs.go
+++ b/api/v1/cluster_funcs.go
@@ -1397,8 +1397,8 @@ func (cluster *Cluster) GetRecoverySourcePlugin() *PluginConfiguration {
// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object.
func (cluster *Cluster) EnsureGVKIsPresent() {
cluster.SetGroupVersionKind(schema.GroupVersionKind{
- Group: GroupVersion.Group,
- Version: GroupVersion.Version,
+ Group: SchemeGroupVersion.Group,
+ Version: SchemeGroupVersion.Version,
Kind: ClusterKind,
})
}
diff --git a/api/v1/clusterimagecatalog_types.go b/api/v1/clusterimagecatalog_types.go
index 850822fbec..7f0a7dc970 100644
--- a/api/v1/clusterimagecatalog_types.go
+++ b/api/v1/clusterimagecatalog_types.go
@@ -19,6 +19,7 @@ package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient
+// +genclient:nonNamespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:storageversion
diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go
index bb665cb83b..ded686668d 100644
--- a/api/v1/groupversion_info.go
+++ b/api/v1/groupversion_info.go
@@ -14,9 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1 contains API Schema definitions for the postgresql v1 API group
-// +kubebuilder:object:generate=true
-// +groupName=postgresql.cnpg.io
package v1
import (
@@ -51,11 +48,11 @@ const (
)
var (
- // GroupVersion is group version used to register these objects
- GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"}
+ // SchemeGroupVersion is group version used to register these objects
+ SchemeGroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
- SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+ SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
diff --git a/internal/cmd/plugin/report/operator_utils.go b/internal/cmd/plugin/report/operator_utils.go
index e51aea6491..a0876ed4f1 100644
--- a/internal/cmd/plugin/report/operator_utils.go
+++ b/internal/cmd/plugin/report/operator_utils.go
@@ -44,7 +44,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) (
for _, item := range mutatingWebhookConfigList.Items {
for _, webhook := range item.Webhooks {
- if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.GroupVersion.Group {
+ if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.SchemeGroupVersion.Group {
mWebhookConfig.Items = append(mWebhookConfig.Items, item)
}
}
@@ -63,7 +63,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) (
for _, item := range validatingWebhookConfigList.Items {
for _, webhook := range item.Webhooks {
- if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.GroupVersion.Group {
+ if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.SchemeGroupVersion.Group {
vWebhookConfig.Items = append(vWebhookConfig.Items, item)
}
}
@@ -79,7 +79,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) (
if len(mWebhookConfig.Items) == 0 || len(vWebhookConfig.Items) == 0 {
return nil, nil, fmt.Errorf(
"can't find the webhooks that targeting resources within the group %s",
- apiv1.GroupVersion.Group,
+ apiv1.SchemeGroupVersion.Group,
)
}
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index bc46d91bae..1c3f0c4d68 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -69,7 +69,7 @@ const (
imageCatalogKey = ".spec.imageCatalog.name"
)
-var apiGVString = apiv1.GroupVersion.String()
+var apiSGVString = apiv1.SchemeGroupVersion.String()
// errOldPrimaryDetected occurs when a primary Pod loses connectivity with the
// API server and, upon reconnection, attempts to retain its previous primary
@@ -1205,7 +1205,7 @@ func IsOwnedByCluster(obj client.Object) (string, bool) {
return "", false
}
- if owner.APIVersion != apiGVString {
+ if owner.APIVersion != apiSGVString {
return "", false
}
diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go
index d6aa79bcbf..9d40db537a 100644
--- a/internal/controller/cluster_create_test.go
+++ b/internal/controller/cluster_create_test.go
@@ -868,7 +868,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() {
cluster := apiv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: apiv1.ClusterKind,
- APIVersion: apiGVString,
+ APIVersion: apiSGVString,
},
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace},
}
@@ -1159,7 +1159,7 @@ var _ = Describe("Service Reconciling", func() {
cluster = apiv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: apiv1.ClusterKind,
- APIVersion: apiv1.GroupVersion.String(),
+ APIVersion: apiv1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go
index 547b610cc9..46f106bd53 100644
--- a/internal/controller/cluster_image.go
+++ b/internal/controller/cluster_image.go
@@ -76,7 +76,7 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C
}
apiGroup := cluster.Spec.ImageCatalogRef.APIGroup
- if apiGroup == nil || *apiGroup != apiv1.GroupVersion.Group {
+ if apiGroup == nil || *apiGroup != apiv1.SchemeGroupVersion.Group {
contextLogger.Info("Unknown catalog group")
return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError,
"Invalid image catalog group")
diff --git a/internal/controller/cluster_restore_test.go b/internal/controller/cluster_restore_test.go
index cb68fc565b..364f7aa821 100644
--- a/internal/controller/cluster_restore_test.go
+++ b/internal/controller/cluster_restore_test.go
@@ -483,7 +483,7 @@ var _ = Describe("ensureOrphanServicesAreNotPresent", func() {
Namespace: cluster.Namespace,
},
}
- cluster.TypeMeta = metav1.TypeMeta{Kind: apiv1.ClusterKind, APIVersion: apiv1.GroupVersion.String()}
+ cluster.TypeMeta = metav1.TypeMeta{Kind: apiv1.ClusterKind, APIVersion: apiv1.SchemeGroupVersion.String()}
cluster.SetInheritedDataAndOwnership(&svc.ObjectMeta)
mockCli = fake.NewClientBuilder().
WithScheme(k8scheme.BuildWithAllKnownScheme()).
diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go
index e1d343f35b..63a1651175 100644
--- a/internal/controller/pooler_controller.go
+++ b/internal/controller/pooler_controller.go
@@ -157,7 +157,7 @@ func isOwnedByPoolerKind(obj client.Object) (string, bool) {
return "", false
}
- if owner.APIVersion != apiGVString {
+ if owner.APIVersion != apiSGVString {
return "", false
}
diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go
index 8d4d3fa248..4fd0138e64 100644
--- a/internal/controller/scheduledbackup_controller.go
+++ b/internal/controller/scheduledbackup_controller.go
@@ -348,7 +348,7 @@ func (r *ScheduledBackupReconciler) SetupWithManager(
return nil
}
- if owner.APIVersion != apiGVString {
+ if owner.APIVersion != apiSGVString {
return nil
}
diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go
index 5f1396d865..2a753bfb52 100644
--- a/internal/controller/suite_test.go
+++ b/internal/controller/suite_test.go
@@ -151,7 +151,7 @@ func newFakePooler(k8sClient client.Client, cluster *apiv1.Cluster) *apiv1.Poole
// upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308
pooler.TypeMeta = metav1.TypeMeta{
Kind: apiv1.PoolerKind,
- APIVersion: apiv1.GroupVersion.String(),
+ APIVersion: apiv1.SchemeGroupVersion.String(),
}
return pooler
@@ -219,7 +219,7 @@ func newFakeCNPGCluster(
// upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308
cluster.TypeMeta = metav1.TypeMeta{
Kind: apiv1.ClusterKind,
- APIVersion: apiv1.GroupVersion.String(),
+ APIVersion: apiv1.SchemeGroupVersion.String(),
}
return cluster
@@ -270,7 +270,7 @@ func newFakeCNPGClusterWithPGWal(k8sClient client.Client, namespace string) *api
// upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308
cluster.TypeMeta = metav1.TypeMeta{
Kind: apiv1.ClusterKind,
- APIVersion: apiv1.GroupVersion.String(),
+ APIVersion: apiv1.SchemeGroupVersion.String(),
}
return cluster
diff --git a/pkg/management/client.go b/pkg/management/client.go
index 95105530f6..51f1fc02a7 100644
--- a/pkg/management/client.go
+++ b/pkg/management/client.go
@@ -73,7 +73,7 @@ func NewControllerRuntimeClient() (client.WithWatch, error) {
return nil, err
}
- mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{apiv1.GroupVersion})
+ mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{apiv1.SchemeGroupVersion})
// add here any resource that need to be registered.
objectsToRegister := []runtime.Object{
// custom resources
diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go
index 2440e0e299..aa4e6c9528 100644
--- a/tests/e2e/rolling_update_test.go
+++ b/tests/e2e/rolling_update_test.go
@@ -352,7 +352,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun
Instances: instances,
ImageCatalogRef: &apiv1.ImageCatalogRef{
TypedLocalObjectReference: corev1.TypedLocalObjectReference{
- APIGroup: &apiv1.GroupVersion.Group,
+ APIGroup: &apiv1.SchemeGroupVersion.Group,
Name: name,
Kind: "ImageCatalog",
},
diff --git a/tests/utils/operator/upgrade.go b/tests/utils/operator/upgrade.go
index e8a2e7af21..1959c2d557 100644
--- a/tests/utils/operator/upgrade.go
+++ b/tests/utils/operator/upgrade.go
@@ -104,8 +104,8 @@ func InstallLatest(
Eventually(func() error {
mapping, err := crudClient.RESTMapper().RESTMapping(
- schema.GroupKind{Group: apiv1.GroupVersion.Group, Kind: apiv1.ClusterKind},
- apiv1.GroupVersion.Version)
+ schema.GroupKind{Group: apiv1.SchemeGroupVersion.Group, Kind: apiv1.ClusterKind},
+ apiv1.SchemeGroupVersion.Version)
if err != nil {
return err
}
From 0b1d1405743da8dffe875e5717ef93a6a73afcbc Mon Sep 17 00:00:00 2001
From: Peggie
Date: Tue, 4 Feb 2025 14:46:15 +0100
Subject: [PATCH 338/836] feat: Public Cloud K8S versions update (#6595)
Update the versions used to test the operator on public cloud providers
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Jonathan Gonzalez V.
Co-authored-by: public-cloud-k8s-versions-check
---
.github/aks_versions.json | 4 ++--
.github/eks_versions.json | 4 ++--
.github/kind_versions.json | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/.github/aks_versions.json b/.github/aks_versions.json
index b5a3eed520..c1825b7a7e 100644
--- a/.github/aks_versions.json
+++ b/.github/aks_versions.json
@@ -1,6 +1,6 @@
[
- "1.31.2",
- "1.30.6",
+ "1.31.3",
+ "1.30.7",
"1.29.9",
"1.28.9"
]
diff --git a/.github/eks_versions.json b/.github/eks_versions.json
index 3121122733..49228d19da 100644
--- a/.github/eks_versions.json
+++ b/.github/eks_versions.json
@@ -1,6 +1,6 @@
[
+ "1.32",
"1.31",
"1.30",
- "1.29",
- "1.28"
+ "1.29"
]
diff --git a/.github/kind_versions.json b/.github/kind_versions.json
index b39d642e5d..10e6039591 100644
--- a/.github/kind_versions.json
+++ b/.github/kind_versions.json
@@ -1,5 +1,5 @@
[
- "v1.32.0",
+ "v1.32.1",
"v1.31.4",
"v1.30.8",
"v1.29.12",
From 69a65e7eaf800b9d157dd43e29150f0acc50b3fb Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Wed, 5 Feb 2025 15:14:08 +0100
Subject: [PATCH 339/836] fix: avoid loading helper images when running test on
kind (#6770)
Avoid loading the images due to a known issue that makes kind
v0.26.0 with kindest/node v1.32.1 fails when loading the images.
This issue was reported here
https://github.com/kubernetes-sigs/kind/issues/3853
Signed-off-by: Jonathan Gonzalez V.
---
hack/e2e/run-e2e-kind.sh | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh
index 129bccffff..a7e6f07763 100755
--- a/hack/e2e/run-e2e-kind.sh
+++ b/hack/e2e/run-e2e-kind.sh
@@ -84,7 +84,8 @@ main() {
"${HACK_DIR}/setup-cluster.sh" load
fi
- "${HACK_DIR}/setup-cluster.sh" load-helper-images
+ # Comment out when the a new release of kindest/node is release newer than v1.32.1
+ # "${HACK_DIR}/setup-cluster.sh" load-helper-images
RC=0
From 80ae3c3efd6b19e0d108b47a8e7afc917c5b3fb8 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 6 Feb 2025 09:21:08 +0100
Subject: [PATCH 340/836] fix(deps): update module
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to
v0.80.0 (main) (#6763)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 86cb8cdfd2..a1fdf634ae 100644
--- a/go.mod
+++ b/go.mod
@@ -25,7 +25,7 @@ require (
github.com/mitchellh/go-ps v1.0.0
github.com/onsi/ginkgo/v2 v2.22.2
github.com/onsi/gomega v1.36.2
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0
github.com/prometheus/client_golang v1.20.5
github.com/robfig/cron v1.2.0
github.com/sethvargo/go-password v0.3.1
diff --git a/go.sum b/go.sum
index 3dc9308dce..4353561378 100644
--- a/go.sum
+++ b/go.sum
@@ -154,8 +154,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 h1:DGv150w4UyxnjNHlkCw85R3+lspOxegtdnbpP2vKRrk=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0 h1:ckSycH7xHtpcvXsmEY/qEziRhDQKqKqbsHi9kX/BO7A=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
From 93c26736b4d63de718f8296fc66970e5d83efab0 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 6 Feb 2025 14:57:48 +0100
Subject: [PATCH 341/836] fix(deps): update module golang.org/x/term to v0.29.0
(main) (#6765)
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index a1fdf634ae..a8d260f438 100644
--- a/go.mod
+++ b/go.mod
@@ -35,7 +35,7 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/term v0.28.0
+ golang.org/x/term v0.29.0
google.golang.org/grpc v1.70.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.1
@@ -102,7 +102,7 @@ require (
golang.org/x/net v0.34.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.10.0 // indirect
- golang.org/x/sys v0.29.0 // indirect
+ golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.28.0 // indirect
diff --git a/go.sum b/go.sum
index 4353561378..6d5c36b461 100644
--- a/go.sum
+++ b/go.sum
@@ -238,10 +238,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
-golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
-golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
+golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
From ec911af1ca1275384cb4bceef894eba5da329526 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 6 Feb 2025 16:08:35 +0100
Subject: [PATCH 342/836] chore(deps): update dependency rook/rook to v1.16.3
(main) (#6778)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 5ae69682f1..bbe6bf737a 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -37,7 +37,7 @@ env:
GOLANG_VERSION: "1.23.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.26.0"
- ROOK_VERSION: "v1.16.2"
+ ROOK_VERSION: "v1.16.3"
EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
From 7aaa52d557c036e59a582c7e41b611c4e4f04d5c Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 6 Feb 2025 17:49:25 +0100
Subject: [PATCH 343/836] chore: remove unused yaml files (#6436)
Some yaml files were unused since some years and not required,
they were also including and mentioning the use of kube-rbac-proxy
which is something we don't use.
Closes #6223
Signed-off-by: Jonathan Gonzalez V.
---
config/default/kustomization.yaml | 7 +-----
config/default/manager_auth_proxy_patch.yaml | 25 -------------------
.../rbac/auth_proxy_client_clusterrole.yaml | 7 ------
config/rbac/auth_proxy_role.yaml | 13 ----------
config/rbac/auth_proxy_role_binding.yaml | 12 ---------
config/rbac/auth_proxy_service.yaml | 14 -----------
config/rbac/kustomization.yaml | 9 +------
7 files changed, 2 insertions(+), 85 deletions(-)
delete mode 100644 config/default/manager_auth_proxy_patch.yaml
delete mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml
delete mode 100644 config/rbac/auth_proxy_role.yaml
delete mode 100644 config/rbac/auth_proxy_role_binding.yaml
delete mode 100644 config/rbac/auth_proxy_service.yaml
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
index 9a299052ba..079ffd5c70 100644
--- a/config/default/kustomization.yaml
+++ b/config/default/kustomization.yaml
@@ -22,7 +22,7 @@ resources:
- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
-# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
+# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patches:
@@ -40,11 +40,6 @@ patches:
name: controller-manager
version: v1
-# Protect the /metrics endpoint by putting it behind auth.
-# If you want your controller-manager to expose the /metrics
-# endpoint w/o any authn/z, please comment the following line.
-#- manager_auth_proxy_patch.yaml
-
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml
deleted file mode 100644
index 43095c5fb2..0000000000
--- a/config/default/manager_auth_proxy_patch.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-# This patch inject a sidecar container which is a HTTP proxy for the controller manager,
-# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: controller-manager
- namespace: system
-spec:
- template:
- spec:
- containers:
- - name: manager
- args:
- - "--metrics-bind-address=127.0.0.1:8080"
- - "--leader-elect"
- - name: kube-rbac-proxy
- image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
- args:
- - "--secure-listen-address=0.0.0.0:8443"
- - "--upstream=http://127.0.0.1:8080/"
- - "--logtostderr=true"
- - "--v=10"
- ports:
- - containerPort: 8443
- name: https
diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml
deleted file mode 100644
index bd4af137a9..0000000000
--- a/config/rbac/auth_proxy_client_clusterrole.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: metrics-reader
-rules:
-- nonResourceURLs: ["/metrics"]
- verbs: ["get"]
diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml
deleted file mode 100644
index 618f5e4177..0000000000
--- a/config/rbac/auth_proxy_role.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: proxy-role
-rules:
-- apiGroups: ["authentication.k8s.io"]
- resources:
- - tokenreviews
- verbs: ["create"]
-- apiGroups: ["authorization.k8s.io"]
- resources:
- - subjectaccessreviews
- verbs: ["create"]
diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml
deleted file mode 100644
index 46f50c4d66..0000000000
--- a/config/rbac/auth_proxy_role_binding.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: proxy-rolebinding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: proxy-role
-subjects:
-- kind: ServiceAccount
- name: cnpg-manager
- namespace: cnpg-system
diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml
deleted file mode 100644
index 1a0b3a02e0..0000000000
--- a/config/rbac/auth_proxy_service.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- app.kubernetes.io/name: cloudnative-pg
- name: controller-manager-metrics-service
- namespace: system
-spec:
- ports:
- - name: https
- port: 8443
- targetPort: https
- selector:
- app.kubernetes.io/name: cloudnative-pg
diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml
index a561c73dc9..3d9a82e989 100644
--- a/config/rbac/kustomization.yaml
+++ b/config/rbac/kustomization.yaml
@@ -3,13 +3,7 @@ kind: Kustomization
resources:
- role.yaml
- role_binding.yaml
-# Comment the following 4 lines if you want to disable
-# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
-# which protects your /metrics endpoint.
-#- auth_proxy_service.yaml
-#- auth_proxy_role.yaml
-#- auth_proxy_role_binding.yaml
-#- auth_proxy_client_clusterrole.yaml
+
# For each CRD, "Editor" and "Viewer" roles are scaffolded by
# default, aiding admins in cluster management. Those roles are
# not used by the Project itself. You can comment the following lines
@@ -20,4 +14,3 @@ resources:
- publication_viewer_role.yaml
- database_editor_role.yaml
- database_viewer_role.yaml
-
From f44d4e829f6dc191f9642fd80ed3723d3a3c9307 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 11 Feb 2025 11:58:33 +0100
Subject: [PATCH 344/836] chore(deps): update dependency vmware-tanzu/velero to
v1.15.2 (main) (#6811)
---
.github/workflows/continuous-delivery.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index bbe6bf737a..6db7e8e51a 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -1346,7 +1346,7 @@ jobs:
name: Setup Velero
uses: nick-fields/retry@v3
env:
- VELERO_VERSION: "v1.15.1"
+ VELERO_VERSION: "v1.15.2"
VELERO_AWS_PLUGIN_VERSION: "v1.11.1"
with:
timeout_minutes: 10
From 14c1a0382747c5e22f1b3d77353c9f9a12fd6ae0 Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Tue, 11 Feb 2025 15:59:55 +0100
Subject: [PATCH 345/836] chore: introduce the `no-stale` label to issues
(#6817)
Use the `no-stale` action to control which issues should be exempted
from being considered inactive.
Signed-off-by: Gabriele Bartolini
---
.github/workflows/close-inactive-issues.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml
index 3ee8af2c20..f24654a23f 100644
--- a/.github/workflows/close-inactive-issues.yml
+++ b/.github/workflows/close-inactive-issues.yml
@@ -21,3 +21,4 @@ jobs:
days-before-pr-stale: -1
days-before-pr-close: -1
ascending: true
+ exempt-issue-labels: "no-stale"
From 1ec07e0e5d38e9c972082a5856079e3125b8eb32 Mon Sep 17 00:00:00 2001
From: Tao Li
Date: Tue, 11 Feb 2025 23:28:06 +0800
Subject: [PATCH 346/836] fix(cnpg-plugin): collect logs from all containers,
including sidecars, in instance pods (#6636)
Fixes an issue in `kubectl cnpg report --logs ` where sidecar
container logs were not being collected. The update ensures all
containers in instance pods are included in the log collection process.
Closes #6632
Signed-off-by: Tao Li
Signed-off-by: Armando Ruocco
Co-authored-by: Armando Ruocco
---
internal/cmd/plugin/report/logs.go | 65 ++++++++++++++++--------------
1 file changed, 35 insertions(+), 30 deletions(-)
diff --git a/internal/cmd/plugin/report/logs.go b/internal/cmd/plugin/report/logs.go
index 220c6bf3b9..e6858696d9 100644
--- a/internal/cmd/plugin/report/logs.go
+++ b/internal/cmd/plugin/report/logs.go
@@ -113,28 +113,31 @@ func streamClusterLogsToZip(
Previous: true,
}
- for _, pod := range podList.Items {
- writer, err := zipper.Create(filepath.Join(logsdir, pod.Name) + ".jsonl")
- if err != nil {
- return fmt.Errorf("could not add '%s' to zip: %w",
- filepath.Join(logsdir, pod.Name), err)
- }
- podPointer := pod
- streamPodLogs.Pod = &podPointer
+ for idx := range podList.Items {
+ pod := podList.Items[idx]
+ for _, container := range pod.Spec.Containers {
+ path := filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, container.Name))
+ writer, err := zipper.Create(path)
+ if err != nil {
+ return fmt.Errorf("could not add '%s' to zip: %w", path, err)
+ }
+ streamPodLogs.Options.Container = container.Name
+ streamPodLogs.Pod = &pod
- if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil {
- return err
- }
- // We ignore the error because it will error if there are no previous logs
- _ = streamPodLogs.Stream(ctx, writer)
- if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil {
- return err
- }
+ if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil {
+ return err
+ }
+ // We ignore the error because it will error if there are no previous logs
+ _ = streamPodLogs.Stream(ctx, writer)
+ if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil {
+ return err
+ }
- streamPodLogs.Previous = false
+ streamPodLogs.Previous = false
- if err := streamPodLogs.Stream(ctx, writer); err != nil {
- return err
+ if err := streamPodLogs.Stream(ctx, writer); err != nil {
+ return err
+ }
}
}
@@ -180,17 +183,19 @@ func streamClusterJobLogsToZip(ctx context.Context, clusterName, namespace strin
Options: podLogOptions,
Previous: false,
}
- for _, pod := range podList.Items {
- writer, err := zipper.Create(filepath.Join(logsdir, pod.Name) + ".jsonl")
- if err != nil {
- return fmt.Errorf("could not add '%s' to zip: %w",
- filepath.Join(logsdir, pod.Name), err)
- }
- podPointer := pod
- streamPodLogs.Pod = &podPointer
- err = streamPodLogs.Stream(ctx, writer)
- if err != nil {
- return err
+ for idx := range podList.Items {
+ pod := podList.Items[idx]
+ for _, container := range pod.Spec.Containers {
+ path := filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, container.Name))
+ writer, err := zipper.Create(path)
+ if err != nil {
+ return fmt.Errorf("could not add '%s' to zip: %w", path, err)
+ }
+ streamPodLogs.Options.Container = container.Name
+ streamPodLogs.Pod = &pod
+ if err = streamPodLogs.Stream(ctx, writer); err != nil {
+ return err
+ }
}
}
}
From dfb09582c37df9ee91aead7ab5ad41b46180ed15 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 11 Feb 2025 19:29:06 +0100
Subject: [PATCH 347/836] chore(deps): update module
sigs.k8s.io/controller-tools to v0.17.2 (main) (#6591)
---
Makefile | 2 +-
config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_publications.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_subscriptions.yaml | 2 +-
10 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/Makefile b/Makefile
index 82a2d24a20..9da0c2ac46 100644
--- a/Makefile
+++ b/Makefile
@@ -43,7 +43,7 @@ LOCALBIN ?= $(shell pwd)/bin
BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.6.0
-CONTROLLER_TOOLS_VERSION ?= v0.16.5
+CONTROLLER_TOOLS_VERSION ?= v0.17.2
GORELEASER_VERSION ?= v2.6.1
SPELLCHECK_VERSION ?= 0.46.0
WOKE_VERSION ?= 0.19.0
diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml
index d4d5b3bc97..2d352fbf40 100644
--- a/config/crd/bases/postgresql.cnpg.io_backups.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: backups.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
index 06d1592286..83d116861f 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: clusterimagecatalogs.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 1057f16c99..c7de1196c6 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: clusters.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml
index b9db5db349..d8ae251677 100644
--- a/config/crd/bases/postgresql.cnpg.io_databases.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: databases.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml
index cf90a01fa3..a28ad6132e 100644
--- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: imagecatalogs.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
index 6039e1e5ea..162ba3b2da 100644
--- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: poolers.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml
index 2e0fdaf0e9..bbeb13ee9e 100644
--- a/config/crd/bases/postgresql.cnpg.io_publications.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: publications.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
index 534a4e423c..6c2406a879 100644
--- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: scheduledbackups.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml
index 24a9ff12a1..e93bf37d10 100644
--- a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.5
+ controller-gen.kubebuilder.io/version: v0.17.2
name: subscriptions.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
From 3a0ec8613e6226d9f7b311547318e9e6ce0dbaa4 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 11 Feb 2025 20:19:30 +0100
Subject: [PATCH 348/836] chore(deps): update module
github.com/goreleaser/goreleaser to v2.7.0 (main) (#6818)
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 9da0c2ac46..95f7cea662 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,7 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.6.0
CONTROLLER_TOOLS_VERSION ?= v0.17.2
-GORELEASER_VERSION ?= v2.6.1
+GORELEASER_VERSION ?= v2.7.0
SPELLCHECK_VERSION ?= 0.46.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.39.1
From 4286c0f2ab954092d3af30906115cffb91440304 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 12 Feb 2025 06:53:10 +0100
Subject: [PATCH 349/836] chore(deps): update spellcheck to v0.47.0 (main)
(#6832)
---
.github/workflows/spellcheck.yml | 2 +-
Makefile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml
index 27de6b2c8e..27e4d031f8 100644
--- a/.github/workflows/spellcheck.yml
+++ b/.github/workflows/spellcheck.yml
@@ -28,4 +28,4 @@ jobs:
uses: actions/checkout@v4
- name: Spellcheck
- uses: rojopolis/spellcheck-github-actions@0.46.0
+ uses: rojopolis/spellcheck-github-actions@0.47.0
diff --git a/Makefile b/Makefile
index 95f7cea662..12313c2d47 100644
--- a/Makefile
+++ b/Makefile
@@ -45,7 +45,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions
KUSTOMIZE_VERSION ?= v5.6.0
CONTROLLER_TOOLS_VERSION ?= v0.17.2
GORELEASER_VERSION ?= v2.7.0
-SPELLCHECK_VERSION ?= 0.46.0
+SPELLCHECK_VERSION ?= 0.47.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.39.1
OPM_VERSION ?= v1.50.0
From 957082b0b367195e9ce3706f58e039e89d56cd88 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 12 Feb 2025 09:23:36 +0100
Subject: [PATCH 350/836] chore(deps): update dependency
redhat-openshift-ecosystem/openshift-preflight to v1.12.0 (main) (#6831)
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 12313c2d47..5a52830ccd 100644
--- a/Makefile
+++ b/Makefile
@@ -49,7 +49,7 @@ SPELLCHECK_VERSION ?= 0.47.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.39.1
OPM_VERSION ?= v1.50.0
-PREFLIGHT_VERSION ?= 1.11.1
+PREFLIGHT_VERSION ?= 1.12.0
OPENSHIFT_VERSIONS ?= v4.12-v4.18
ARCH ?= amd64
From d0e8f5f60ef72b0ea7201fd9f28ac487ef016eac Mon Sep 17 00:00:00 2001
From: Daniil Zakhlystov <47750602+usernamedt@users.noreply.github.com>
Date: Wed, 12 Feb 2025 18:39:52 +0100
Subject: [PATCH 351/836] fix(pgbouncer): handle `load_balance_hosts` null
value (#6810)
When using the `SHOW STATS` command to handle metrics for pgBouncer,
expect NULL as a result and ensure it is handled properly.
This issue was introduced in PR #6630.
Relates #6566
Signed-off-by: Daniil Zakhlystov
Co-authored-by: Daniil Zakhlystov
---
pkg/management/pgbouncer/metricsserver/pools.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go
index 0c7ee4ae41..d6edd77fd9 100644
--- a/pkg/management/pgbouncer/metricsserver/pools.go
+++ b/pkg/management/pgbouncer/metricsserver/pools.go
@@ -242,7 +242,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
)
// PGBouncer 1.24.0 or above
var (
- loadBalanceHosts int
+ loadBalanceHosts sql.NullInt32
)
cols, err := rows.Columns()
@@ -336,7 +336,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) {
e.Metrics.ShowPools.MaxWait.WithLabelValues(database, user).Set(float64(maxWait))
e.Metrics.ShowPools.MaxWaitUs.WithLabelValues(database, user).Set(float64(maxWaitUs))
e.Metrics.ShowPools.PoolMode.WithLabelValues(database, user).Set(float64(poolModeToInt(poolMode)))
- e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts))
+ e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts.Int32))
}
e.Metrics.ShowPools.ClActive.Collect(ch)
From 17706712785a40447ad7300a86816eb2a2c134f1 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Wed, 12 Feb 2025 22:07:32 +0100
Subject: [PATCH 352/836] chore(deps): update dependency golangci/golangci-lint
to v1.64.3 (main) (#6839)
---
.github/workflows/continuous-integration.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index bdd2c33d3c..65f145d0eb 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -17,7 +17,7 @@ on:
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.23.x"
- GOLANGCI_LINT_VERSION: "v1.63.4"
+ GOLANGCI_LINT_VERSION: "v1.64.3"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.26.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
From 6604e846f4ebb830d459038adb9400be4e9a8a7b Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 13 Feb 2025 14:12:26 +0100
Subject: [PATCH 353/836] chore(deps): update dependency golangci/golangci-lint
to v1.64.4 (main) (#6851)
---
.github/workflows/continuous-integration.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 65f145d0eb..b18a19dd7c 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -17,7 +17,7 @@ on:
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.23.x"
- GOLANGCI_LINT_VERSION: "v1.64.3"
+ GOLANGCI_LINT_VERSION: "v1.64.4"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.26.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
From 902724703b876d196c155f2da1e9c5275ea07ff2 Mon Sep 17 00:00:00 2001
From: Marco Nenciarini
Date: Mon, 17 Feb 2025 10:44:59 +0100
Subject: [PATCH 354/836] chore: fix generated files controller-tools 1.17.2
(#6862)
The commit dfb09582c37df9ee91aead7ab5ad41b46180ed15 upgraded the
controller-tools dependency from version 1.16.5 to 1.17.2. While it
includes updates to the generated manifests, it omits the corresponding
changes to the generated DeepCopy functions.
Signed-off-by: Marco Nenciarini
---
api/v1/zz_generated.deepcopy.go | 40 ++++++++++++++++-----------------
1 file changed, 19 insertions(+), 21 deletions(-)
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index c8396cfd7f..bde8690abd 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -21,8 +21,6 @@ limitations under the License.
package v1
import (
- pkgapi "github.com/cloudnative-pg/barman-cloud/pkg/api"
- "github.com/cloudnative-pg/machinery/pkg/api"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -131,7 +129,7 @@ func (in *BackupConfiguration) DeepCopyInto(out *BackupConfiguration) {
}
if in.BarmanObjectStore != nil {
in, out := &in.BarmanObjectStore, &out.BarmanObjectStore
- *out = new(pkgapi.BarmanObjectStoreConfiguration)
+ *out = new(BarmanObjectStoreConfiguration)
(*in).DeepCopyInto(*out)
}
}
@@ -238,10 +236,10 @@ func (in *BackupSnapshotStatus) DeepCopy() *BackupSnapshotStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupSource) DeepCopyInto(out *BackupSource) {
*out = *in
- out.LocalObjectReference = in.LocalObjectReference
+ in.LocalObjectReference.DeepCopyInto(&out.LocalObjectReference)
if in.EndpointCA != nil {
in, out := &in.EndpointCA, &out.EndpointCA
- *out = new(api.SecretKeySelector)
+ *out = new(SecretKeySelector)
**out = **in
}
}
@@ -259,7 +257,7 @@ func (in *BackupSource) DeepCopy() *BackupSource {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = *in
- out.Cluster = in.Cluster
+ in.Cluster.DeepCopyInto(&out.Cluster)
if in.PluginConfiguration != nil {
in, out := &in.PluginConfiguration, &out.PluginConfiguration
*out = new(BackupPluginConfiguration)
@@ -293,7 +291,7 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
in.BarmanCredentials.DeepCopyInto(&out.BarmanCredentials)
if in.EndpointCA != nil {
in, out := &in.EndpointCA, &out.EndpointCA
- *out = new(api.SecretKeySelector)
+ *out = new(SecretKeySelector)
**out = **in
}
if in.StartedAt != nil {
@@ -379,7 +377,7 @@ func (in *BootstrapInitDB) DeepCopyInto(out *BootstrapInitDB) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
- *out = new(api.LocalObjectReference)
+ *out = new(LocalObjectReference)
**out = **in
}
if in.Options != nil {
@@ -444,7 +442,7 @@ func (in *BootstrapPgBaseBackup) DeepCopyInto(out *BootstrapPgBaseBackup) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
- *out = new(api.LocalObjectReference)
+ *out = new(LocalObjectReference)
**out = **in
}
}
@@ -479,7 +477,7 @@ func (in *BootstrapRecovery) DeepCopyInto(out *BootstrapRecovery) {
}
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
- *out = new(api.LocalObjectReference)
+ *out = new(LocalObjectReference)
**out = **in
}
}
@@ -715,7 +713,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
}
if in.SuperuserSecret != nil {
in, out := &in.SuperuserSecret, &out.SuperuserSecret
- *out = new(api.LocalObjectReference)
+ *out = new(LocalObjectReference)
**out = **in
}
if in.EnableSuperuserAccess != nil {
@@ -730,7 +728,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
- *out = make([]api.LocalObjectReference, len(*in))
+ *out = make([]LocalObjectReference, len(*in))
copy(*out, *in)
}
in.StorageConfiguration.DeepCopyInto(&out.StorageConfiguration)
@@ -1238,7 +1236,7 @@ func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) {
}
if in.BarmanObjectStore != nil {
in, out := &in.BarmanObjectStore, &out.BarmanObjectStore
- *out = new(pkgapi.BarmanObjectStoreConfiguration)
+ *out = new(BarmanObjectStoreConfiguration)
(*in).DeepCopyInto(*out)
}
if in.PluginConfiguration != nil {
@@ -1661,12 +1659,12 @@ func (in *MonitoringConfiguration) DeepCopyInto(out *MonitoringConfiguration) {
}
if in.CustomQueriesConfigMap != nil {
in, out := &in.CustomQueriesConfigMap, &out.CustomQueriesConfigMap
- *out = make([]api.ConfigMapKeySelector, len(*in))
+ *out = make([]ConfigMapKeySelector, len(*in))
copy(*out, *in)
}
if in.CustomQueriesSecret != nil {
in, out := &in.CustomQueriesSecret, &out.CustomQueriesSecret
- *out = make([]api.SecretKeySelector, len(*in))
+ *out = make([]SecretKeySelector, len(*in))
copy(*out, *in)
}
if in.TLSConfig != nil {
@@ -1801,7 +1799,7 @@ func (in *PgBouncerSpec) DeepCopyInto(out *PgBouncerSpec) {
*out = *in
if in.AuthQuerySecret != nil {
in, out := &in.AuthQuerySecret, &out.AuthQuerySecret
- *out = new(api.LocalObjectReference)
+ *out = new(LocalObjectReference)
**out = **in
}
if in.Parameters != nil {
@@ -2068,7 +2066,7 @@ func (in *PoolerSecrets) DeepCopy() *PoolerSecrets {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PoolerSpec) DeepCopyInto(out *PoolerSpec) {
*out = *in
- out.Cluster = in.Cluster
+ in.Cluster.DeepCopyInto(&out.Cluster)
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
*out = new(int32)
@@ -2494,7 +2492,7 @@ func (in *RoleConfiguration) DeepCopyInto(out *RoleConfiguration) {
*out = *in
if in.PasswordSecret != nil {
in, out := &in.PasswordSecret, &out.PasswordSecret
- *out = new(api.LocalObjectReference)
+ *out = new(LocalObjectReference)
**out = **in
}
if in.ValidUntil != nil {
@@ -2528,12 +2526,12 @@ func (in *SQLRefs) DeepCopyInto(out *SQLRefs) {
*out = *in
if in.SecretRefs != nil {
in, out := &in.SecretRefs, &out.SecretRefs
- *out = make([]api.SecretKeySelector, len(*in))
+ *out = make([]SecretKeySelector, len(*in))
copy(*out, *in)
}
if in.ConfigMapRefs != nil {
in, out := &in.ConfigMapRefs, &out.ConfigMapRefs
- *out = make([]api.ConfigMapKeySelector, len(*in))
+ *out = make([]ConfigMapKeySelector, len(*in))
copy(*out, *in)
}
}
@@ -2620,7 +2618,7 @@ func (in *ScheduledBackupSpec) DeepCopyInto(out *ScheduledBackupSpec) {
*out = new(bool)
**out = **in
}
- out.Cluster = in.Cluster
+ in.Cluster.DeepCopyInto(&out.Cluster)
if in.PluginConfiguration != nil {
in, out := &in.PluginConfiguration, &out.PluginConfiguration
*out = new(BackupPluginConfiguration)
From 5d826575479eca77567f9ae02e39d83b635909bf Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."